xref: /openbmc/linux/arch/mips/include/asm/pgtable.h (revision 11696c5e)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003 Ralf Baechle
7  */
8 #ifndef _ASM_PGTABLE_H
9 #define _ASM_PGTABLE_H
10 
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
13 #ifdef CONFIG_32BIT
14 #include <asm/pgtable-32.h>
15 #endif
16 #ifdef CONFIG_64BIT
17 #include <asm/pgtable-64.h>
18 #endif
19 
20 #include <asm/cmpxchg.h>
21 #include <asm/io.h>
22 #include <asm/pgtable-bits.h>
23 #include <asm/cpu-features.h>
24 
25 struct mm_struct;
26 struct vm_area_struct;
27 
28 #define PAGE_SHARED	vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED)
29 
30 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
31 				 _PAGE_GLOBAL | _page_cachable_default)
32 #define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
33 				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
34 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
35 			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
36 
37 /*
38  * If _PAGE_NO_EXEC is not defined, we can't do page protection for
39  * execute, and consider it to be the same as read. Also, write
40  * permissions imply read permissions. This is the closest we can get
41  * by reasonable means..
42  */
43 
44 extern unsigned long _page_cachable_default;
45 extern void __update_cache(unsigned long address, pte_t pte);
46 
47 /*
48  * ZERO_PAGE is a global shared page that is always zero; used
49  * for zero-mapped memory areas etc..
50  */
51 
52 extern unsigned long empty_zero_page;
53 extern unsigned long zero_page_mask;
54 
55 #define ZERO_PAGE(vaddr) \
56 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
57 #define __HAVE_COLOR_ZERO_PAGE
58 
59 extern void paging_init(void);
60 
61 /*
62  * Conversion functions: convert a page and protection to a page entry,
63  * and a page entry and page directory to the page they refer to.
64  */
65 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
66 
67 static inline unsigned long pmd_pfn(pmd_t pmd)
68 {
69 	return pmd_val(pmd) >> _PFN_SHIFT;
70 }
71 
72 #ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT
73 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
74 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
75 
76 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
77 
78 #define htw_stop()							\
79 do {									\
80 	unsigned long __flags;						\
81 									\
82 	if (cpu_has_htw) {						\
83 		local_irq_save(__flags);				\
84 		if(!raw_current_cpu_data.htw_seq++) {			\
85 			write_c0_pwctl(read_c0_pwctl() &		\
86 				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
87 			back_to_back_c0_hazard();			\
88 		}							\
89 		local_irq_restore(__flags);				\
90 	}								\
91 } while(0)
92 
93 #define htw_start()							\
94 do {									\
95 	unsigned long __flags;						\
96 									\
97 	if (cpu_has_htw) {						\
98 		local_irq_save(__flags);				\
99 		if (!--raw_current_cpu_data.htw_seq) {			\
100 			write_c0_pwctl(read_c0_pwctl() |		\
101 				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
102 			back_to_back_c0_hazard();			\
103 		}							\
104 		local_irq_restore(__flags);				\
105 	}								\
106 } while(0)
107 
108 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
109 			      pte_t *ptep, pte_t pteval);
110 
111 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
112 
113 #ifdef CONFIG_XPA
114 # define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
115 #else
116 # define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
117 #endif
118 
119 #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
120 #define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
121 
122 static inline void set_pte(pte_t *ptep, pte_t pte)
123 {
124 	ptep->pte_high = pte.pte_high;
125 	smp_wmb();
126 	ptep->pte_low = pte.pte_low;
127 
128 #ifdef CONFIG_XPA
129 	if (pte.pte_high & _PAGE_GLOBAL) {
130 #else
131 	if (pte.pte_low & _PAGE_GLOBAL) {
132 #endif
133 		pte_t *buddy = ptep_buddy(ptep);
134 		/*
135 		 * Make sure the buddy is global too (if it's !none,
136 		 * it better already be global)
137 		 */
138 		if (pte_none(*buddy)) {
139 			if (!IS_ENABLED(CONFIG_XPA))
140 				buddy->pte_low |= _PAGE_GLOBAL;
141 			buddy->pte_high |= _PAGE_GLOBAL;
142 		}
143 	}
144 }
145 
146 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
147 {
148 	pte_t null = __pte(0);
149 
150 	htw_stop();
151 	/* Preserve global status for the pair */
152 	if (IS_ENABLED(CONFIG_XPA)) {
153 		if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
154 			null.pte_high = _PAGE_GLOBAL;
155 	} else {
156 		if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
157 			null.pte_low = null.pte_high = _PAGE_GLOBAL;
158 	}
159 
160 	set_pte_at(mm, addr, ptep, null);
161 	htw_start();
162 }
163 #else
164 
165 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
166 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
167 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
168 
169 /*
170  * Certain architectures need to do special things when pte's
171  * within a page table are directly modified.  Thus, the following
172  * hook is made available.
173  */
174 static inline void set_pte(pte_t *ptep, pte_t pteval)
175 {
176 	*ptep = pteval;
177 #if !defined(CONFIG_CPU_R3K_TLB)
178 	if (pte_val(pteval) & _PAGE_GLOBAL) {
179 		pte_t *buddy = ptep_buddy(ptep);
180 		/*
181 		 * Make sure the buddy is global too (if it's !none,
182 		 * it better already be global)
183 		 */
184 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
185 		cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
186 # else
187 		cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
188 # endif
189 	}
190 #endif
191 }
192 
193 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
194 {
195 	htw_stop();
196 #if !defined(CONFIG_CPU_R3K_TLB)
197 	/* Preserve global status for the pair */
198 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
199 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
200 	else
201 #endif
202 		set_pte_at(mm, addr, ptep, __pte(0));
203 	htw_start();
204 }
205 #endif
206 
207 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
208 			      pte_t *ptep, pte_t pteval)
209 {
210 
211 	if (!pte_present(pteval))
212 		goto cache_sync_done;
213 
214 	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
215 		goto cache_sync_done;
216 
217 	__update_cache(addr, pteval);
218 cache_sync_done:
219 	set_pte(ptep, pteval);
220 }
221 
222 /*
223  * (pmds are folded into puds so this doesn't get actually called,
224  * but the define is needed for a generic inline function.)
225  */
226 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
227 
228 #ifndef __PAGETABLE_PMD_FOLDED
229 /*
230  * (puds are folded into pgds so this doesn't get actually called,
231  * but the define is needed for a generic inline function.)
232  */
233 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
234 #endif
235 
236 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
237 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
238 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
239 
240 /*
241  * We used to declare this array with size but gcc 3.3 and older are not able
242  * to find that this expression is a constant, so the size is dropped.
243  */
244 extern pgd_t swapper_pg_dir[];
245 
246 /*
247  * Platform specific pte_special() and pte_mkspecial() definitions
248  * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
249  */
250 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
251 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
252 static inline int pte_special(pte_t pte)
253 {
254 	return pte.pte_low & _PAGE_SPECIAL;
255 }
256 
257 static inline pte_t pte_mkspecial(pte_t pte)
258 {
259 	pte.pte_low |= _PAGE_SPECIAL;
260 	return pte;
261 }
262 #else
263 static inline int pte_special(pte_t pte)
264 {
265 	return pte_val(pte) & _PAGE_SPECIAL;
266 }
267 
268 static inline pte_t pte_mkspecial(pte_t pte)
269 {
270 	pte_val(pte) |= _PAGE_SPECIAL;
271 	return pte;
272 }
273 #endif
274 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
275 
276 /*
277  * The following only work if pte_present() is true.
278  * Undefined behaviour if not..
279  */
280 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
281 static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
282 static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
283 static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
284 
285 static inline pte_t pte_wrprotect(pte_t pte)
286 {
287 	pte.pte_low  &= ~_PAGE_WRITE;
288 	if (!IS_ENABLED(CONFIG_XPA))
289 		pte.pte_low &= ~_PAGE_SILENT_WRITE;
290 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
291 	return pte;
292 }
293 
294 static inline pte_t pte_mkclean(pte_t pte)
295 {
296 	pte.pte_low  &= ~_PAGE_MODIFIED;
297 	if (!IS_ENABLED(CONFIG_XPA))
298 		pte.pte_low &= ~_PAGE_SILENT_WRITE;
299 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
300 	return pte;
301 }
302 
303 static inline pte_t pte_mkold(pte_t pte)
304 {
305 	pte.pte_low  &= ~_PAGE_ACCESSED;
306 	if (!IS_ENABLED(CONFIG_XPA))
307 		pte.pte_low &= ~_PAGE_SILENT_READ;
308 	pte.pte_high &= ~_PAGE_SILENT_READ;
309 	return pte;
310 }
311 
312 static inline pte_t pte_mkwrite(pte_t pte)
313 {
314 	pte.pte_low |= _PAGE_WRITE;
315 	if (pte.pte_low & _PAGE_MODIFIED) {
316 		if (!IS_ENABLED(CONFIG_XPA))
317 			pte.pte_low |= _PAGE_SILENT_WRITE;
318 		pte.pte_high |= _PAGE_SILENT_WRITE;
319 	}
320 	return pte;
321 }
322 
323 static inline pte_t pte_mkdirty(pte_t pte)
324 {
325 	pte.pte_low |= _PAGE_MODIFIED;
326 	if (pte.pte_low & _PAGE_WRITE) {
327 		if (!IS_ENABLED(CONFIG_XPA))
328 			pte.pte_low |= _PAGE_SILENT_WRITE;
329 		pte.pte_high |= _PAGE_SILENT_WRITE;
330 	}
331 	return pte;
332 }
333 
334 static inline pte_t pte_mkyoung(pte_t pte)
335 {
336 	pte.pte_low |= _PAGE_ACCESSED;
337 	if (!(pte.pte_low & _PAGE_NO_READ)) {
338 		if (!IS_ENABLED(CONFIG_XPA))
339 			pte.pte_low |= _PAGE_SILENT_READ;
340 		pte.pte_high |= _PAGE_SILENT_READ;
341 	}
342 	return pte;
343 }
344 #else
345 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
346 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
347 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
348 
349 static inline pte_t pte_wrprotect(pte_t pte)
350 {
351 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
352 	return pte;
353 }
354 
355 static inline pte_t pte_mkclean(pte_t pte)
356 {
357 	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
358 	return pte;
359 }
360 
361 static inline pte_t pte_mkold(pte_t pte)
362 {
363 	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
364 	return pte;
365 }
366 
367 static inline pte_t pte_mkwrite(pte_t pte)
368 {
369 	pte_val(pte) |= _PAGE_WRITE;
370 	if (pte_val(pte) & _PAGE_MODIFIED)
371 		pte_val(pte) |= _PAGE_SILENT_WRITE;
372 	return pte;
373 }
374 
375 static inline pte_t pte_mkdirty(pte_t pte)
376 {
377 	pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
378 	if (pte_val(pte) & _PAGE_WRITE)
379 		pte_val(pte) |= _PAGE_SILENT_WRITE;
380 	return pte;
381 }
382 
383 static inline pte_t pte_mkyoung(pte_t pte)
384 {
385 	pte_val(pte) |= _PAGE_ACCESSED;
386 	if (!(pte_val(pte) & _PAGE_NO_READ))
387 		pte_val(pte) |= _PAGE_SILENT_READ;
388 	return pte;
389 }
390 
391 #define pte_sw_mkyoung	pte_mkyoung
392 
393 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
394 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
395 
396 static inline pte_t pte_mkhuge(pte_t pte)
397 {
398 	pte_val(pte) |= _PAGE_HUGE;
399 	return pte;
400 }
401 
402 #define pmd_write pmd_write
403 static inline int pmd_write(pmd_t pmd)
404 {
405 	return !!(pmd_val(pmd) & _PAGE_WRITE);
406 }
407 
408 static inline struct page *pmd_page(pmd_t pmd)
409 {
410 	if (pmd_val(pmd) & _PAGE_HUGE)
411 		return pfn_to_page(pmd_pfn(pmd));
412 
413 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
414 }
415 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
416 
417 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
418 static inline bool pte_soft_dirty(pte_t pte)
419 {
420 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
421 }
422 #define pte_swp_soft_dirty pte_soft_dirty
423 
424 static inline pte_t pte_mksoft_dirty(pte_t pte)
425 {
426 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
427 	return pte;
428 }
429 #define pte_swp_mksoft_dirty pte_mksoft_dirty
430 
431 static inline pte_t pte_clear_soft_dirty(pte_t pte)
432 {
433 	pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
434 	return pte;
435 }
436 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
437 
438 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
439 
440 #endif
441 
442 /*
443  * Macro to make mark a page protection value as "uncacheable".	 Note
444  * that "protection" is really a misnomer here as the protection value
445  * contains the memory attribute bits, dirty bits, and various other
446  * bits as well.
447  */
448 #define pgprot_noncached pgprot_noncached
449 
450 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
451 {
452 	unsigned long prot = pgprot_val(_prot);
453 
454 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
455 
456 	return __pgprot(prot);
457 }
458 
459 #define pgprot_writecombine pgprot_writecombine
460 
461 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
462 {
463 	unsigned long prot = pgprot_val(_prot);
464 
465 	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
466 	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
467 
468 	return __pgprot(prot);
469 }
470 
471 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
472 						unsigned long address,
473 						pte_t *ptep)
474 {
475 }
476 
477 #define __HAVE_ARCH_PTE_SAME
478 static inline int pte_same(pte_t pte_a, pte_t pte_b)
479 {
480 	return pte_val(pte_a) == pte_val(pte_b);
481 }
482 
483 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
484 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
485 					unsigned long address, pte_t *ptep,
486 					pte_t entry, int dirty)
487 {
488 	if (!pte_same(*ptep, entry))
489 		set_pte_at(vma->vm_mm, address, ptep, entry);
490 	/*
491 	 * update_mmu_cache will unconditionally execute, handling both
492 	 * the case that the PTE changed and the spurious fault case.
493 	 */
494 	return true;
495 }
496 
497 /*
498  * Conversion functions: convert a page and protection to a page entry,
499  * and a page entry and page directory to the page they refer to.
500  */
501 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
502 
503 #if defined(CONFIG_XPA)
504 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
505 {
506 	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
507 	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
508 	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
509 	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
510 	return pte;
511 }
512 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
513 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
514 {
515 	pte.pte_low  &= _PAGE_CHG_MASK;
516 	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
517 	pte.pte_low  |= pgprot_val(newprot);
518 	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
519 	return pte;
520 }
521 #else
522 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
523 {
524 	pte_val(pte) &= _PAGE_CHG_MASK;
525 	pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
526 	if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
527 		pte_val(pte) |= _PAGE_SILENT_READ;
528 	return pte;
529 }
530 #endif
531 
532 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
533 static inline int pte_swp_exclusive(pte_t pte)
534 {
535 	return pte.pte_low & _PAGE_SWP_EXCLUSIVE;
536 }
537 
538 static inline pte_t pte_swp_mkexclusive(pte_t pte)
539 {
540 	pte.pte_low |= _PAGE_SWP_EXCLUSIVE;
541 	return pte;
542 }
543 
544 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
545 {
546 	pte.pte_low &= ~_PAGE_SWP_EXCLUSIVE;
547 	return pte;
548 }
549 #else
550 static inline int pte_swp_exclusive(pte_t pte)
551 {
552 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
553 }
554 
555 static inline pte_t pte_swp_mkexclusive(pte_t pte)
556 {
557 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
558 	return pte;
559 }
560 
561 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
562 {
563 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
564 	return pte;
565 }
566 #endif
567 
568 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
569 	pte_t pte);
570 
571 static inline void update_mmu_cache(struct vm_area_struct *vma,
572 	unsigned long address, pte_t *ptep)
573 {
574 	pte_t pte = *ptep;
575 	__update_tlb(vma, address, pte);
576 }
577 
578 #define	__HAVE_ARCH_UPDATE_MMU_TLB
579 #define update_mmu_tlb	update_mmu_cache
580 
581 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
582 	unsigned long address, pmd_t *pmdp)
583 {
584 	pte_t pte = *(pte_t *)pmdp;
585 
586 	__update_tlb(vma, address, pte);
587 }
588 
589 /*
590  * Allow physical addresses to be fixed up to help 36-bit peripherals.
591  */
592 #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
593 phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
594 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
595 		unsigned long pfn, unsigned long size, pgprot_t prot);
596 #define io_remap_pfn_range io_remap_pfn_range
597 #else
598 #define fixup_bigphys_addr(addr, size)	(addr)
599 #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
600 
601 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
602 
603 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
604 #define pmdp_establish generic_pmdp_establish
605 
606 #define has_transparent_hugepage has_transparent_hugepage
607 extern int has_transparent_hugepage(void);
608 
609 static inline int pmd_trans_huge(pmd_t pmd)
610 {
611 	return !!(pmd_val(pmd) & _PAGE_HUGE);
612 }
613 
614 static inline pmd_t pmd_mkhuge(pmd_t pmd)
615 {
616 	pmd_val(pmd) |= _PAGE_HUGE;
617 
618 	return pmd;
619 }
620 
621 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
622 		       pmd_t *pmdp, pmd_t pmd);
623 
624 static inline pmd_t pmd_wrprotect(pmd_t pmd)
625 {
626 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
627 	return pmd;
628 }
629 
630 static inline pmd_t pmd_mkwrite(pmd_t pmd)
631 {
632 	pmd_val(pmd) |= _PAGE_WRITE;
633 	if (pmd_val(pmd) & _PAGE_MODIFIED)
634 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
635 
636 	return pmd;
637 }
638 
639 static inline int pmd_dirty(pmd_t pmd)
640 {
641 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
642 }
643 
644 static inline pmd_t pmd_mkclean(pmd_t pmd)
645 {
646 	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
647 	return pmd;
648 }
649 
650 static inline pmd_t pmd_mkdirty(pmd_t pmd)
651 {
652 	pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
653 	if (pmd_val(pmd) & _PAGE_WRITE)
654 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
655 
656 	return pmd;
657 }
658 
659 #define pmd_young pmd_young
660 static inline int pmd_young(pmd_t pmd)
661 {
662 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
663 }
664 
665 static inline pmd_t pmd_mkold(pmd_t pmd)
666 {
667 	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
668 
669 	return pmd;
670 }
671 
672 static inline pmd_t pmd_mkyoung(pmd_t pmd)
673 {
674 	pmd_val(pmd) |= _PAGE_ACCESSED;
675 
676 	if (!(pmd_val(pmd) & _PAGE_NO_READ))
677 		pmd_val(pmd) |= _PAGE_SILENT_READ;
678 
679 	return pmd;
680 }
681 
682 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
683 static inline int pmd_soft_dirty(pmd_t pmd)
684 {
685 	return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
686 }
687 
688 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
689 {
690 	pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
691 	return pmd;
692 }
693 
694 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
695 {
696 	pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
697 	return pmd;
698 }
699 
700 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
701 
702 /* Extern to avoid header file madness */
703 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
704 
705 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
706 {
707 	pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
708 		       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
709 	return pmd;
710 }
711 
712 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
713 {
714 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
715 
716 	return pmd;
717 }
718 
719 /*
720  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
721  * different prototype.
722  */
723 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
724 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
725 					    unsigned long address, pmd_t *pmdp)
726 {
727 	pmd_t old = *pmdp;
728 
729 	pmd_clear(pmdp);
730 
731 	return old;
732 }
733 
734 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
735 
736 #ifdef _PAGE_HUGE
737 #define pmd_leaf(pmd)	((pmd_val(pmd) & _PAGE_HUGE) != 0)
738 #define pud_leaf(pud)	((pud_val(pud) & _PAGE_HUGE) != 0)
739 #endif
740 
741 #define gup_fast_permitted(start, end)	(!cpu_has_dc_aliases)
742 
743 /*
744  * We provide our own get_unmapped area to cope with the virtual aliasing
745  * constraints placed on us by the cache architecture.
746  */
747 #define HAVE_ARCH_UNMAPPED_AREA
748 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
749 
750 #endif /* _ASM_PGTABLE_H */
751