xref: /openbmc/linux/arch/mips/include/asm/pgtable.h (revision 22fc4c4c9fd60427bcda00878cee94e7622cfa7a)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003 Ralf Baechle
7  */
8 #ifndef _ASM_PGTABLE_H
9 #define _ASM_PGTABLE_H
10 
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
13 #ifdef CONFIG_32BIT
14 #include <asm/pgtable-32.h>
15 #endif
16 #ifdef CONFIG_64BIT
17 #include <asm/pgtable-64.h>
18 #endif
19 
20 #include <asm/io.h>
21 #include <asm/pgtable-bits.h>
22 
23 struct mm_struct;
24 struct vm_area_struct;
25 
26 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
27 				 _page_cachable_default)
28 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
29 				 _page_cachable_default)
30 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
31 				 _page_cachable_default)
32 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | \
33 				 _page_cachable_default)
34 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
35 				 _PAGE_GLOBAL | _page_cachable_default)
36 #define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
37 				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
38 #define PAGE_USERIO	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
39 				 _page_cachable_default)
40 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
41 			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
42 
43 /*
44  * If _PAGE_NO_EXEC is not defined, we can't do page protection for
45  * execute, and consider it to be the same as read. Also, write
46  * permissions imply read permissions. This is the closest we can get
47  * by reasonable means..
48  */
49 
50 /*
51  * Dummy values to fill the table in mmap.c
52  * The real values will be generated at runtime
53  */
54 #define __P000 __pgprot(0)
55 #define __P001 __pgprot(0)
56 #define __P010 __pgprot(0)
57 #define __P011 __pgprot(0)
58 #define __P100 __pgprot(0)
59 #define __P101 __pgprot(0)
60 #define __P110 __pgprot(0)
61 #define __P111 __pgprot(0)
62 
63 #define __S000 __pgprot(0)
64 #define __S001 __pgprot(0)
65 #define __S010 __pgprot(0)
66 #define __S011 __pgprot(0)
67 #define __S100 __pgprot(0)
68 #define __S101 __pgprot(0)
69 #define __S110 __pgprot(0)
70 #define __S111 __pgprot(0)
71 
72 extern unsigned long _page_cachable_default;
73 
74 /*
75  * ZERO_PAGE is a global shared page that is always zero; used
76  * for zero-mapped memory areas etc..
77  */
78 
79 extern unsigned long empty_zero_page;
80 extern unsigned long zero_page_mask;
81 
82 #define ZERO_PAGE(vaddr) \
83 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
84 #define __HAVE_COLOR_ZERO_PAGE
85 
86 extern void paging_init(void);
87 
88 /*
89  * Conversion functions: convert a page and protection to a page entry,
90  * and a page entry and page directory to the page they refer to.
91  */
92 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
93 
94 #define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
95 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
96 #define pmd_page(pmd)		__pmd_page(pmd)
97 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
98 
99 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
100 
101 #define htw_stop()							\
102 do {									\
103 	unsigned long flags;						\
104 									\
105 	if (cpu_has_htw) {						\
106 		local_irq_save(flags);					\
107 		if(!raw_current_cpu_data.htw_seq++) {			\
108 			write_c0_pwctl(read_c0_pwctl() &		\
109 				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
110 			back_to_back_c0_hazard();			\
111 		}							\
112 		local_irq_restore(flags);				\
113 	}								\
114 } while(0)
115 
116 #define htw_start()							\
117 do {									\
118 	unsigned long flags;						\
119 									\
120 	if (cpu_has_htw) {						\
121 		local_irq_save(flags);					\
122 		if (!--raw_current_cpu_data.htw_seq) {			\
123 			write_c0_pwctl(read_c0_pwctl() |		\
124 				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
125 			back_to_back_c0_hazard();			\
126 		}							\
127 		local_irq_restore(flags);				\
128 	}								\
129 } while(0)
130 
131 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
132 			      pte_t *ptep, pte_t pteval);
133 
134 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
135 
136 #ifdef CONFIG_XPA
137 # define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
138 #else
139 # define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
140 #endif
141 
142 #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
143 #define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
144 
145 static inline void set_pte(pte_t *ptep, pte_t pte)
146 {
147 	ptep->pte_high = pte.pte_high;
148 	smp_wmb();
149 	ptep->pte_low = pte.pte_low;
150 
151 #ifdef CONFIG_XPA
152 	if (pte.pte_high & _PAGE_GLOBAL) {
153 #else
154 	if (pte.pte_low & _PAGE_GLOBAL) {
155 #endif
156 		pte_t *buddy = ptep_buddy(ptep);
157 		/*
158 		 * Make sure the buddy is global too (if it's !none,
159 		 * it better already be global)
160 		 */
161 		if (pte_none(*buddy)) {
162 			if (!IS_ENABLED(CONFIG_XPA))
163 				buddy->pte_low |= _PAGE_GLOBAL;
164 			buddy->pte_high |= _PAGE_GLOBAL;
165 		}
166 	}
167 }
168 
169 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
170 {
171 	pte_t null = __pte(0);
172 
173 	htw_stop();
174 	/* Preserve global status for the pair */
175 	if (IS_ENABLED(CONFIG_XPA)) {
176 		if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
177 			null.pte_high = _PAGE_GLOBAL;
178 	} else {
179 		if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
180 			null.pte_low = null.pte_high = _PAGE_GLOBAL;
181 	}
182 
183 	set_pte_at(mm, addr, ptep, null);
184 	htw_start();
185 }
186 #else
187 
188 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
189 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
190 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
191 
192 /*
193  * Certain architectures need to do special things when pte's
194  * within a page table are directly modified.  Thus, the following
195  * hook is made available.
196  */
197 static inline void set_pte(pte_t *ptep, pte_t pteval)
198 {
199 	*ptep = pteval;
200 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
201 	if (pte_val(pteval) & _PAGE_GLOBAL) {
202 		pte_t *buddy = ptep_buddy(ptep);
203 		/*
204 		 * Make sure the buddy is global too (if it's !none,
205 		 * it better already be global)
206 		 */
207 #ifdef CONFIG_SMP
208 		/*
209 		 * For SMP, multiple CPUs can race, so we need to do
210 		 * this atomically.
211 		 */
212 		unsigned long page_global = _PAGE_GLOBAL;
213 		unsigned long tmp;
214 
215 		if (kernel_uses_llsc && R10000_LLSC_WAR) {
216 			__asm__ __volatile__ (
217 			"	.set	push				\n"
218 			"	.set	arch=r4000			\n"
219 			"	.set	noreorder			\n"
220 			"1:"	__LL	"%[tmp], %[buddy]		\n"
221 			"	bnez	%[tmp], 2f			\n"
222 			"	 or	%[tmp], %[tmp], %[global]	\n"
223 				__SC	"%[tmp], %[buddy]		\n"
224 			"	beqzl	%[tmp], 1b			\n"
225 			"	nop					\n"
226 			"2:						\n"
227 			"	.set	pop				\n"
228 			: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
229 			: [global] "r" (page_global));
230 		} else if (kernel_uses_llsc) {
231 			__asm__ __volatile__ (
232 			"	.set	push				\n"
233 			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
234 			"	.set	noreorder			\n"
235 			"1:"	__LL	"%[tmp], %[buddy]		\n"
236 			"	bnez	%[tmp], 2f			\n"
237 			"	 or	%[tmp], %[tmp], %[global]	\n"
238 				__SC	"%[tmp], %[buddy]		\n"
239 			"	beqz	%[tmp], 1b			\n"
240 			"	nop					\n"
241 			"2:						\n"
242 			"	.set	pop				\n"
243 			: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
244 			: [global] "r" (page_global));
245 		}
246 #else /* !CONFIG_SMP */
247 		if (pte_none(*buddy))
248 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
249 #endif /* CONFIG_SMP */
250 	}
251 #endif
252 }
253 
254 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
255 {
256 	htw_stop();
257 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
258 	/* Preserve global status for the pair */
259 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
260 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
261 	else
262 #endif
263 		set_pte_at(mm, addr, ptep, __pte(0));
264 	htw_start();
265 }
266 #endif
267 
268 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
269 			      pte_t *ptep, pte_t pteval)
270 {
271 	extern void __update_cache(unsigned long address, pte_t pte);
272 
273 	if (!pte_present(pteval))
274 		goto cache_sync_done;
275 
276 	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
277 		goto cache_sync_done;
278 
279 	__update_cache(addr, pteval);
280 cache_sync_done:
281 	set_pte(ptep, pteval);
282 }
283 
284 /*
285  * (pmds are folded into puds so this doesn't get actually called,
286  * but the define is needed for a generic inline function.)
287  */
288 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
289 
290 #ifndef __PAGETABLE_PMD_FOLDED
291 /*
292  * (puds are folded into pgds so this doesn't get actually called,
293  * but the define is needed for a generic inline function.)
294  */
295 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
296 #endif
297 
298 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
299 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
300 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
301 
302 /*
303  * We used to declare this array with size but gcc 3.3 and older are not able
304  * to find that this expression is a constant, so the size is dropped.
305  */
306 extern pgd_t swapper_pg_dir[];
307 
308 /*
309  * The following only work if pte_present() is true.
310  * Undefined behaviour if not..
311  */
312 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
313 static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
314 static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
315 static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
316 
317 static inline pte_t pte_wrprotect(pte_t pte)
318 {
319 	pte.pte_low  &= ~_PAGE_WRITE;
320 	if (!IS_ENABLED(CONFIG_XPA))
321 		pte.pte_low &= ~_PAGE_SILENT_WRITE;
322 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
323 	return pte;
324 }
325 
326 static inline pte_t pte_mkclean(pte_t pte)
327 {
328 	pte.pte_low  &= ~_PAGE_MODIFIED;
329 	if (!IS_ENABLED(CONFIG_XPA))
330 		pte.pte_low &= ~_PAGE_SILENT_WRITE;
331 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
332 	return pte;
333 }
334 
335 static inline pte_t pte_mkold(pte_t pte)
336 {
337 	pte.pte_low  &= ~_PAGE_ACCESSED;
338 	if (!IS_ENABLED(CONFIG_XPA))
339 		pte.pte_low &= ~_PAGE_SILENT_READ;
340 	pte.pte_high &= ~_PAGE_SILENT_READ;
341 	return pte;
342 }
343 
344 static inline pte_t pte_mkwrite(pte_t pte)
345 {
346 	pte.pte_low |= _PAGE_WRITE;
347 	if (pte.pte_low & _PAGE_MODIFIED) {
348 		if (!IS_ENABLED(CONFIG_XPA))
349 			pte.pte_low |= _PAGE_SILENT_WRITE;
350 		pte.pte_high |= _PAGE_SILENT_WRITE;
351 	}
352 	return pte;
353 }
354 
355 static inline pte_t pte_mkdirty(pte_t pte)
356 {
357 	pte.pte_low |= _PAGE_MODIFIED;
358 	if (pte.pte_low & _PAGE_WRITE) {
359 		if (!IS_ENABLED(CONFIG_XPA))
360 			pte.pte_low |= _PAGE_SILENT_WRITE;
361 		pte.pte_high |= _PAGE_SILENT_WRITE;
362 	}
363 	return pte;
364 }
365 
366 static inline pte_t pte_mkyoung(pte_t pte)
367 {
368 	pte.pte_low |= _PAGE_ACCESSED;
369 	if (!(pte.pte_low & _PAGE_NO_READ)) {
370 		if (!IS_ENABLED(CONFIG_XPA))
371 			pte.pte_low |= _PAGE_SILENT_READ;
372 		pte.pte_high |= _PAGE_SILENT_READ;
373 	}
374 	return pte;
375 }
376 #else
377 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
378 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
379 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
380 
381 static inline pte_t pte_wrprotect(pte_t pte)
382 {
383 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
384 	return pte;
385 }
386 
387 static inline pte_t pte_mkclean(pte_t pte)
388 {
389 	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
390 	return pte;
391 }
392 
393 static inline pte_t pte_mkold(pte_t pte)
394 {
395 	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
396 	return pte;
397 }
398 
399 static inline pte_t pte_mkwrite(pte_t pte)
400 {
401 	pte_val(pte) |= _PAGE_WRITE;
402 	if (pte_val(pte) & _PAGE_MODIFIED)
403 		pte_val(pte) |= _PAGE_SILENT_WRITE;
404 	return pte;
405 }
406 
407 static inline pte_t pte_mkdirty(pte_t pte)
408 {
409 	pte_val(pte) |= _PAGE_MODIFIED;
410 	if (pte_val(pte) & _PAGE_WRITE)
411 		pte_val(pte) |= _PAGE_SILENT_WRITE;
412 	return pte;
413 }
414 
415 static inline pte_t pte_mkyoung(pte_t pte)
416 {
417 	pte_val(pte) |= _PAGE_ACCESSED;
418 	if (!(pte_val(pte) & _PAGE_NO_READ))
419 		pte_val(pte) |= _PAGE_SILENT_READ;
420 	return pte;
421 }
422 
423 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
424 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
425 
426 static inline pte_t pte_mkhuge(pte_t pte)
427 {
428 	pte_val(pte) |= _PAGE_HUGE;
429 	return pte;
430 }
431 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
432 #endif
433 static inline int pte_special(pte_t pte)	{ return 0; }
434 static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
435 
436 /*
437  * Macro to make mark a page protection value as "uncacheable".	 Note
438  * that "protection" is really a misnomer here as the protection value
439  * contains the memory attribute bits, dirty bits, and various other
440  * bits as well.
441  */
442 #define pgprot_noncached pgprot_noncached
443 
444 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
445 {
446 	unsigned long prot = pgprot_val(_prot);
447 
448 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
449 
450 	return __pgprot(prot);
451 }
452 
453 #define pgprot_writecombine pgprot_writecombine
454 
455 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
456 {
457 	unsigned long prot = pgprot_val(_prot);
458 
459 	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
460 	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
461 
462 	return __pgprot(prot);
463 }
464 
465 /*
466  * Conversion functions: convert a page and protection to a page entry,
467  * and a page entry and page directory to the page they refer to.
468  */
469 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
470 
471 #if defined(CONFIG_XPA)
472 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
473 {
474 	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
475 	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
476 	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
477 	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
478 	return pte;
479 }
480 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
481 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
482 {
483 	pte.pte_low  &= _PAGE_CHG_MASK;
484 	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
485 	pte.pte_low  |= pgprot_val(newprot);
486 	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
487 	return pte;
488 }
489 #else
490 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
491 {
492 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
493 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
494 }
495 #endif
496 
497 
498 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
499 	pte_t pte);
500 
501 static inline void update_mmu_cache(struct vm_area_struct *vma,
502 	unsigned long address, pte_t *ptep)
503 {
504 	pte_t pte = *ptep;
505 	__update_tlb(vma, address, pte);
506 }
507 
508 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
509 	unsigned long address, pmd_t *pmdp)
510 {
511 	pte_t pte = *(pte_t *)pmdp;
512 
513 	__update_tlb(vma, address, pte);
514 }
515 
516 #define kern_addr_valid(addr)	(1)
517 
518 #ifdef CONFIG_PHYS_ADDR_T_64BIT
519 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
520 
521 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
522 		unsigned long vaddr,
523 		unsigned long pfn,
524 		unsigned long size,
525 		pgprot_t prot)
526 {
527 	phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
528 	return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
529 }
530 #define io_remap_pfn_range io_remap_pfn_range
531 #endif
532 
533 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
534 
535 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
536 #define pmdp_establish generic_pmdp_establish
537 
538 #define has_transparent_hugepage has_transparent_hugepage
539 extern int has_transparent_hugepage(void);
540 
541 static inline int pmd_trans_huge(pmd_t pmd)
542 {
543 	return !!(pmd_val(pmd) & _PAGE_HUGE);
544 }
545 
546 static inline pmd_t pmd_mkhuge(pmd_t pmd)
547 {
548 	pmd_val(pmd) |= _PAGE_HUGE;
549 
550 	return pmd;
551 }
552 
553 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
554 		       pmd_t *pmdp, pmd_t pmd);
555 
556 #define pmd_write pmd_write
557 static inline int pmd_write(pmd_t pmd)
558 {
559 	return !!(pmd_val(pmd) & _PAGE_WRITE);
560 }
561 
562 static inline pmd_t pmd_wrprotect(pmd_t pmd)
563 {
564 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
565 	return pmd;
566 }
567 
568 static inline pmd_t pmd_mkwrite(pmd_t pmd)
569 {
570 	pmd_val(pmd) |= _PAGE_WRITE;
571 	if (pmd_val(pmd) & _PAGE_MODIFIED)
572 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
573 
574 	return pmd;
575 }
576 
577 static inline int pmd_dirty(pmd_t pmd)
578 {
579 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
580 }
581 
582 static inline pmd_t pmd_mkclean(pmd_t pmd)
583 {
584 	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
585 	return pmd;
586 }
587 
588 static inline pmd_t pmd_mkdirty(pmd_t pmd)
589 {
590 	pmd_val(pmd) |= _PAGE_MODIFIED;
591 	if (pmd_val(pmd) & _PAGE_WRITE)
592 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
593 
594 	return pmd;
595 }
596 
597 static inline int pmd_young(pmd_t pmd)
598 {
599 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
600 }
601 
602 static inline pmd_t pmd_mkold(pmd_t pmd)
603 {
604 	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
605 
606 	return pmd;
607 }
608 
609 static inline pmd_t pmd_mkyoung(pmd_t pmd)
610 {
611 	pmd_val(pmd) |= _PAGE_ACCESSED;
612 
613 	if (!(pmd_val(pmd) & _PAGE_NO_READ))
614 		pmd_val(pmd) |= _PAGE_SILENT_READ;
615 
616 	return pmd;
617 }
618 
619 /* Extern to avoid header file madness */
620 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
621 
622 static inline unsigned long pmd_pfn(pmd_t pmd)
623 {
624 	return pmd_val(pmd) >> _PFN_SHIFT;
625 }
626 
627 static inline struct page *pmd_page(pmd_t pmd)
628 {
629 	if (pmd_trans_huge(pmd))
630 		return pfn_to_page(pmd_pfn(pmd));
631 
632 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
633 }
634 
635 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
636 {
637 	pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
638 		       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
639 	return pmd;
640 }
641 
642 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
643 {
644 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
645 
646 	return pmd;
647 }
648 
649 /*
650  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
651  * different prototype.
652  */
653 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
654 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
655 					    unsigned long address, pmd_t *pmdp)
656 {
657 	pmd_t old = *pmdp;
658 
659 	pmd_clear(pmdp);
660 
661 	return old;
662 }
663 
664 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
665 
666 #include <asm-generic/pgtable.h>
667 
668 /*
669  * uncached accelerated TLB map for video memory access
670  */
671 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
672 #define __HAVE_PHYS_MEM_ACCESS_PROT
673 
674 struct file;
675 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
676 		unsigned long size, pgprot_t vma_prot);
677 #endif
678 
679 /*
680  * We provide our own get_unmapped area to cope with the virtual aliasing
681  * constraints placed on us by the cache architecture.
682  */
683 #define HAVE_ARCH_UNMAPPED_AREA
684 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
685 
686 /*
687  * No page table caches to initialise
688  */
689 #define pgtable_cache_init()	do { } while (0)
690 
691 #endif /* _ASM_PGTABLE_H */
692