xref: /openbmc/linux/arch/mips/include/asm/pgtable.h (revision b58c6630)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003 Ralf Baechle
7  */
8 #ifndef _ASM_PGTABLE_H
9 #define _ASM_PGTABLE_H
10 
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
13 #ifdef CONFIG_32BIT
14 #include <asm/pgtable-32.h>
15 #endif
16 #ifdef CONFIG_64BIT
17 #include <asm/pgtable-64.h>
18 #endif
19 
20 #include <asm/cmpxchg.h>
21 #include <asm/io.h>
22 #include <asm/pgtable-bits.h>
23 #include <asm/cpu-features.h>
24 
25 struct mm_struct;
26 struct vm_area_struct;
27 
28 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
29 				 _page_cachable_default)
30 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
31 				 _page_cachable_default)
32 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
33 				 _page_cachable_default)
34 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | \
35 				 _page_cachable_default)
36 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
37 				 _PAGE_GLOBAL | _page_cachable_default)
38 #define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
39 				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
40 #define PAGE_USERIO	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
41 				 _page_cachable_default)
42 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
43 			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
44 
45 /*
46  * If _PAGE_NO_EXEC is not defined, we can't do page protection for
47  * execute, and consider it to be the same as read. Also, write
48  * permissions imply read permissions. This is the closest we can get
49  * by reasonable means..
50  */
51 
52 /*
53  * Dummy values to fill the table in mmap.c
54  * The real values will be generated at runtime
55  */
56 #define __P000 __pgprot(0)
57 #define __P001 __pgprot(0)
58 #define __P010 __pgprot(0)
59 #define __P011 __pgprot(0)
60 #define __P100 __pgprot(0)
61 #define __P101 __pgprot(0)
62 #define __P110 __pgprot(0)
63 #define __P111 __pgprot(0)
64 
65 #define __S000 __pgprot(0)
66 #define __S001 __pgprot(0)
67 #define __S010 __pgprot(0)
68 #define __S011 __pgprot(0)
69 #define __S100 __pgprot(0)
70 #define __S101 __pgprot(0)
71 #define __S110 __pgprot(0)
72 #define __S111 __pgprot(0)
73 
74 extern unsigned long _page_cachable_default;
75 
76 /*
77  * ZERO_PAGE is a global shared page that is always zero; used
78  * for zero-mapped memory areas etc..
79  */
80 
81 extern unsigned long empty_zero_page;
82 extern unsigned long zero_page_mask;
83 
84 #define ZERO_PAGE(vaddr) \
85 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
86 #define __HAVE_COLOR_ZERO_PAGE
87 
88 extern void paging_init(void);
89 
90 /*
91  * Conversion functions: convert a page and protection to a page entry,
92  * and a page entry and page directory to the page they refer to.
93  */
94 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
95 
96 #define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
97 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
98 #define pmd_page(pmd)		__pmd_page(pmd)
99 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
100 
101 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
102 
103 #define htw_stop()							\
104 do {									\
105 	unsigned long flags;						\
106 									\
107 	if (cpu_has_htw) {						\
108 		local_irq_save(flags);					\
109 		if(!raw_current_cpu_data.htw_seq++) {			\
110 			write_c0_pwctl(read_c0_pwctl() &		\
111 				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
112 			back_to_back_c0_hazard();			\
113 		}							\
114 		local_irq_restore(flags);				\
115 	}								\
116 } while(0)
117 
118 #define htw_start()							\
119 do {									\
120 	unsigned long flags;						\
121 									\
122 	if (cpu_has_htw) {						\
123 		local_irq_save(flags);					\
124 		if (!--raw_current_cpu_data.htw_seq) {			\
125 			write_c0_pwctl(read_c0_pwctl() |		\
126 				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
127 			back_to_back_c0_hazard();			\
128 		}							\
129 		local_irq_restore(flags);				\
130 	}								\
131 } while(0)
132 
133 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
134 			      pte_t *ptep, pte_t pteval);
135 
136 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
137 
138 #ifdef CONFIG_XPA
139 # define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
140 #else
141 # define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
142 #endif
143 
144 #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
145 #define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
146 
147 static inline void set_pte(pte_t *ptep, pte_t pte)
148 {
149 	ptep->pte_high = pte.pte_high;
150 	smp_wmb();
151 	ptep->pte_low = pte.pte_low;
152 
153 #ifdef CONFIG_XPA
154 	if (pte.pte_high & _PAGE_GLOBAL) {
155 #else
156 	if (pte.pte_low & _PAGE_GLOBAL) {
157 #endif
158 		pte_t *buddy = ptep_buddy(ptep);
159 		/*
160 		 * Make sure the buddy is global too (if it's !none,
161 		 * it better already be global)
162 		 */
163 		if (pte_none(*buddy)) {
164 			if (!IS_ENABLED(CONFIG_XPA))
165 				buddy->pte_low |= _PAGE_GLOBAL;
166 			buddy->pte_high |= _PAGE_GLOBAL;
167 		}
168 	}
169 }
170 
171 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
172 {
173 	pte_t null = __pte(0);
174 
175 	htw_stop();
176 	/* Preserve global status for the pair */
177 	if (IS_ENABLED(CONFIG_XPA)) {
178 		if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
179 			null.pte_high = _PAGE_GLOBAL;
180 	} else {
181 		if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
182 			null.pte_low = null.pte_high = _PAGE_GLOBAL;
183 	}
184 
185 	set_pte_at(mm, addr, ptep, null);
186 	htw_start();
187 }
188 #else
189 
190 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
191 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
192 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
193 
194 /*
195  * Certain architectures need to do special things when pte's
196  * within a page table are directly modified.  Thus, the following
197  * hook is made available.
198  */
199 static inline void set_pte(pte_t *ptep, pte_t pteval)
200 {
201 	*ptep = pteval;
202 #if !defined(CONFIG_CPU_R3K_TLB)
203 	if (pte_val(pteval) & _PAGE_GLOBAL) {
204 		pte_t *buddy = ptep_buddy(ptep);
205 		/*
206 		 * Make sure the buddy is global too (if it's !none,
207 		 * it better already be global)
208 		 */
209 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
210 		cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
211 # else
212 		cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
213 # endif
214 	}
215 #endif
216 }
217 
218 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
219 {
220 	htw_stop();
221 #if !defined(CONFIG_CPU_R3K_TLB)
222 	/* Preserve global status for the pair */
223 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
224 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
225 	else
226 #endif
227 		set_pte_at(mm, addr, ptep, __pte(0));
228 	htw_start();
229 }
230 #endif
231 
232 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
233 			      pte_t *ptep, pte_t pteval)
234 {
235 	extern void __update_cache(unsigned long address, pte_t pte);
236 
237 	if (!pte_present(pteval))
238 		goto cache_sync_done;
239 
240 	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
241 		goto cache_sync_done;
242 
243 	__update_cache(addr, pteval);
244 cache_sync_done:
245 	set_pte(ptep, pteval);
246 }
247 
248 /*
249  * (pmds are folded into puds so this doesn't get actually called,
250  * but the define is needed for a generic inline function.)
251  */
252 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
253 
254 #ifndef __PAGETABLE_PMD_FOLDED
255 /*
256  * (puds are folded into pgds so this doesn't get actually called,
257  * but the define is needed for a generic inline function.)
258  */
259 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
260 #endif
261 
262 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
263 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
264 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
265 
266 /*
267  * We used to declare this array with size but gcc 3.3 and older are not able
268  * to find that this expression is a constant, so the size is dropped.
269  */
270 extern pgd_t swapper_pg_dir[];
271 
272 /*
273  * Platform specific pte_special() and pte_mkspecial() definitions
274  * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
275  */
276 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
277 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
278 static inline int pte_special(pte_t pte)
279 {
280 	return pte.pte_low & _PAGE_SPECIAL;
281 }
282 
283 static inline pte_t pte_mkspecial(pte_t pte)
284 {
285 	pte.pte_low |= _PAGE_SPECIAL;
286 	return pte;
287 }
288 #else
289 static inline int pte_special(pte_t pte)
290 {
291 	return pte_val(pte) & _PAGE_SPECIAL;
292 }
293 
294 static inline pte_t pte_mkspecial(pte_t pte)
295 {
296 	pte_val(pte) |= _PAGE_SPECIAL;
297 	return pte;
298 }
299 #endif
300 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
301 
302 /*
303  * The following only work if pte_present() is true.
304  * Undefined behaviour if not..
305  */
306 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
307 static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
308 static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
309 static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
310 
311 static inline pte_t pte_wrprotect(pte_t pte)
312 {
313 	pte.pte_low  &= ~_PAGE_WRITE;
314 	if (!IS_ENABLED(CONFIG_XPA))
315 		pte.pte_low &= ~_PAGE_SILENT_WRITE;
316 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
317 	return pte;
318 }
319 
320 static inline pte_t pte_mkclean(pte_t pte)
321 {
322 	pte.pte_low  &= ~_PAGE_MODIFIED;
323 	if (!IS_ENABLED(CONFIG_XPA))
324 		pte.pte_low &= ~_PAGE_SILENT_WRITE;
325 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
326 	return pte;
327 }
328 
329 static inline pte_t pte_mkold(pte_t pte)
330 {
331 	pte.pte_low  &= ~_PAGE_ACCESSED;
332 	if (!IS_ENABLED(CONFIG_XPA))
333 		pte.pte_low &= ~_PAGE_SILENT_READ;
334 	pte.pte_high &= ~_PAGE_SILENT_READ;
335 	return pte;
336 }
337 
338 static inline pte_t pte_mkwrite(pte_t pte)
339 {
340 	pte.pte_low |= _PAGE_WRITE;
341 	if (pte.pte_low & _PAGE_MODIFIED) {
342 		if (!IS_ENABLED(CONFIG_XPA))
343 			pte.pte_low |= _PAGE_SILENT_WRITE;
344 		pte.pte_high |= _PAGE_SILENT_WRITE;
345 	}
346 	return pte;
347 }
348 
349 static inline pte_t pte_mkdirty(pte_t pte)
350 {
351 	pte.pte_low |= _PAGE_MODIFIED;
352 	if (pte.pte_low & _PAGE_WRITE) {
353 		if (!IS_ENABLED(CONFIG_XPA))
354 			pte.pte_low |= _PAGE_SILENT_WRITE;
355 		pte.pte_high |= _PAGE_SILENT_WRITE;
356 	}
357 	return pte;
358 }
359 
360 static inline pte_t pte_mkyoung(pte_t pte)
361 {
362 	pte.pte_low |= _PAGE_ACCESSED;
363 	if (!(pte.pte_low & _PAGE_NO_READ)) {
364 		if (!IS_ENABLED(CONFIG_XPA))
365 			pte.pte_low |= _PAGE_SILENT_READ;
366 		pte.pte_high |= _PAGE_SILENT_READ;
367 	}
368 	return pte;
369 }
370 #else
371 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
372 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
373 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
374 
375 static inline pte_t pte_wrprotect(pte_t pte)
376 {
377 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
378 	return pte;
379 }
380 
381 static inline pte_t pte_mkclean(pte_t pte)
382 {
383 	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
384 	return pte;
385 }
386 
387 static inline pte_t pte_mkold(pte_t pte)
388 {
389 	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
390 	return pte;
391 }
392 
393 static inline pte_t pte_mkwrite(pte_t pte)
394 {
395 	pte_val(pte) |= _PAGE_WRITE;
396 	if (pte_val(pte) & _PAGE_MODIFIED)
397 		pte_val(pte) |= _PAGE_SILENT_WRITE;
398 	return pte;
399 }
400 
401 static inline pte_t pte_mkdirty(pte_t pte)
402 {
403 	pte_val(pte) |= _PAGE_MODIFIED;
404 	if (pte_val(pte) & _PAGE_WRITE)
405 		pte_val(pte) |= _PAGE_SILENT_WRITE;
406 	return pte;
407 }
408 
409 static inline pte_t pte_mkyoung(pte_t pte)
410 {
411 	pte_val(pte) |= _PAGE_ACCESSED;
412 	if (!(pte_val(pte) & _PAGE_NO_READ))
413 		pte_val(pte) |= _PAGE_SILENT_READ;
414 	return pte;
415 }
416 
417 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
418 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
419 
420 static inline pte_t pte_mkhuge(pte_t pte)
421 {
422 	pte_val(pte) |= _PAGE_HUGE;
423 	return pte;
424 }
425 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
426 #endif
427 
428 /*
429  * Macro to make mark a page protection value as "uncacheable".	 Note
430  * that "protection" is really a misnomer here as the protection value
431  * contains the memory attribute bits, dirty bits, and various other
432  * bits as well.
433  */
434 #define pgprot_noncached pgprot_noncached
435 
436 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
437 {
438 	unsigned long prot = pgprot_val(_prot);
439 
440 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
441 
442 	return __pgprot(prot);
443 }
444 
445 #define pgprot_writecombine pgprot_writecombine
446 
447 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
448 {
449 	unsigned long prot = pgprot_val(_prot);
450 
451 	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
452 	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
453 
454 	return __pgprot(prot);
455 }
456 
457 /*
458  * Conversion functions: convert a page and protection to a page entry,
459  * and a page entry and page directory to the page they refer to.
460  */
461 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
462 
463 #if defined(CONFIG_XPA)
464 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
465 {
466 	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
467 	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
468 	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
469 	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
470 	return pte;
471 }
472 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
473 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
474 {
475 	pte.pte_low  &= _PAGE_CHG_MASK;
476 	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
477 	pte.pte_low  |= pgprot_val(newprot);
478 	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
479 	return pte;
480 }
481 #else
482 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
483 {
484 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
485 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
486 }
487 #endif
488 
489 
490 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
491 	pte_t pte);
492 
493 static inline void update_mmu_cache(struct vm_area_struct *vma,
494 	unsigned long address, pte_t *ptep)
495 {
496 	pte_t pte = *ptep;
497 	__update_tlb(vma, address, pte);
498 }
499 
500 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
501 	unsigned long address, pmd_t *pmdp)
502 {
503 	pte_t pte = *(pte_t *)pmdp;
504 
505 	__update_tlb(vma, address, pte);
506 }
507 
508 #define kern_addr_valid(addr)	(1)
509 
510 #ifdef CONFIG_PHYS_ADDR_T_64BIT
511 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
512 
513 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
514 		unsigned long vaddr,
515 		unsigned long pfn,
516 		unsigned long size,
517 		pgprot_t prot)
518 {
519 	phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
520 	return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
521 }
522 #define io_remap_pfn_range io_remap_pfn_range
523 #endif
524 
525 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
526 
527 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
528 #define pmdp_establish generic_pmdp_establish
529 
530 #define has_transparent_hugepage has_transparent_hugepage
531 extern int has_transparent_hugepage(void);
532 
533 static inline int pmd_trans_huge(pmd_t pmd)
534 {
535 	return !!(pmd_val(pmd) & _PAGE_HUGE);
536 }
537 
538 static inline pmd_t pmd_mkhuge(pmd_t pmd)
539 {
540 	pmd_val(pmd) |= _PAGE_HUGE;
541 
542 	return pmd;
543 }
544 
545 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
546 		       pmd_t *pmdp, pmd_t pmd);
547 
548 #define pmd_write pmd_write
549 static inline int pmd_write(pmd_t pmd)
550 {
551 	return !!(pmd_val(pmd) & _PAGE_WRITE);
552 }
553 
554 static inline pmd_t pmd_wrprotect(pmd_t pmd)
555 {
556 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
557 	return pmd;
558 }
559 
560 static inline pmd_t pmd_mkwrite(pmd_t pmd)
561 {
562 	pmd_val(pmd) |= _PAGE_WRITE;
563 	if (pmd_val(pmd) & _PAGE_MODIFIED)
564 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
565 
566 	return pmd;
567 }
568 
569 static inline int pmd_dirty(pmd_t pmd)
570 {
571 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
572 }
573 
574 static inline pmd_t pmd_mkclean(pmd_t pmd)
575 {
576 	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
577 	return pmd;
578 }
579 
580 static inline pmd_t pmd_mkdirty(pmd_t pmd)
581 {
582 	pmd_val(pmd) |= _PAGE_MODIFIED;
583 	if (pmd_val(pmd) & _PAGE_WRITE)
584 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
585 
586 	return pmd;
587 }
588 
589 static inline int pmd_young(pmd_t pmd)
590 {
591 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
592 }
593 
594 static inline pmd_t pmd_mkold(pmd_t pmd)
595 {
596 	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
597 
598 	return pmd;
599 }
600 
601 static inline pmd_t pmd_mkyoung(pmd_t pmd)
602 {
603 	pmd_val(pmd) |= _PAGE_ACCESSED;
604 
605 	if (!(pmd_val(pmd) & _PAGE_NO_READ))
606 		pmd_val(pmd) |= _PAGE_SILENT_READ;
607 
608 	return pmd;
609 }
610 
611 /* Extern to avoid header file madness */
612 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
613 
614 static inline unsigned long pmd_pfn(pmd_t pmd)
615 {
616 	return pmd_val(pmd) >> _PFN_SHIFT;
617 }
618 
619 static inline struct page *pmd_page(pmd_t pmd)
620 {
621 	if (pmd_trans_huge(pmd))
622 		return pfn_to_page(pmd_pfn(pmd));
623 
624 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
625 }
626 
627 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
628 {
629 	pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
630 		       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
631 	return pmd;
632 }
633 
634 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
635 {
636 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
637 
638 	return pmd;
639 }
640 
641 /*
642  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
643  * different prototype.
644  */
645 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
646 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
647 					    unsigned long address, pmd_t *pmdp)
648 {
649 	pmd_t old = *pmdp;
650 
651 	pmd_clear(pmdp);
652 
653 	return old;
654 }
655 
656 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
657 
658 #ifdef _PAGE_HUGE
659 #define pmd_leaf(pmd)	((pmd_val(pmd) & _PAGE_HUGE) != 0)
660 #define pud_leaf(pud)	((pud_val(pud) & _PAGE_HUGE) != 0)
661 #endif
662 
663 #define gup_fast_permitted(start, end)	(!cpu_has_dc_aliases)
664 
665 #include <asm-generic/pgtable.h>
666 
667 /*
668  * We provide our own get_unmapped area to cope with the virtual aliasing
669  * constraints placed on us by the cache architecture.
670  */
671 #define HAVE_ARCH_UNMAPPED_AREA
672 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
673 
674 #endif /* _ASM_PGTABLE_H */
675