xref: /openbmc/linux/arch/parisc/include/asm/pgtable.h (revision ecfb9f40)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _PARISC_PGTABLE_H
3 #define _PARISC_PGTABLE_H
4 
5 #include <asm/page.h>
6 
7 #if CONFIG_PGTABLE_LEVELS == 3
8 #include <asm-generic/pgtable-nopud.h>
9 #elif CONFIG_PGTABLE_LEVELS == 2
10 #include <asm-generic/pgtable-nopmd.h>
11 #endif
12 
13 #include <asm/fixmap.h>
14 
15 #ifndef __ASSEMBLY__
16 /*
17  * we simulate an x86-style page table for the linux mm code
18  */
19 
20 #include <linux/bitops.h>
21 #include <linux/spinlock.h>
22 #include <linux/mm_types.h>
23 #include <asm/processor.h>
24 #include <asm/cache.h>
25 
26 /* This is for the serialization of PxTLB broadcasts. At least on the N class
27  * systems, only one PxTLB inter processor broadcast can be active at any one
28  * time on the Merced bus. */
29 extern spinlock_t pa_tlb_flush_lock;
30 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
31 extern int pa_serialize_tlb_flushes;
32 #else
33 #define pa_serialize_tlb_flushes        (0)
34 #endif
35 
36 #define purge_tlb_start(flags)  do { \
37 	if (pa_serialize_tlb_flushes)	\
38 		spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
39 	else \
40 		local_irq_save(flags);	\
41 	} while (0)
42 #define purge_tlb_end(flags)	do { \
43 	if (pa_serialize_tlb_flushes)	\
44 		spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
45 	else \
46 		local_irq_restore(flags); \
47 	} while (0)
48 
49 /* Purge data and instruction TLB entries. The TLB purge instructions
50  * are slow on SMP machines since the purge must be broadcast to all CPUs.
51  */
52 
53 static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
54 {
55 	unsigned long flags;
56 
57 	purge_tlb_start(flags);
58 	mtsp(mm->context.space_id, SR_TEMP1);
59 	pdtlb(SR_TEMP1, addr);
60 	pitlb(SR_TEMP1, addr);
61 	purge_tlb_end(flags);
62 }
63 
64 extern void __update_cache(pte_t pte);
65 
66 /* Certain architectures need to do special things when PTEs
67  * within a page table are directly modified.  Thus, the following
68  * hook is made available.
69  */
70 #define set_pte(pteptr, pteval)			\
71 	do {					\
72 		*(pteptr) = (pteval);		\
73 		mb();				\
74 	} while(0)
75 
76 #define set_pte_at(mm, addr, pteptr, pteval)	\
77 	do {					\
78 		if (pte_present(pteval) &&	\
79 		    pte_user(pteval))		\
80 			__update_cache(pteval);	\
81 		*(pteptr) = (pteval);		\
82 		purge_tlb_entries(mm, addr);	\
83 	} while (0)
84 
85 #endif /* !__ASSEMBLY__ */
86 
87 #define pte_ERROR(e) \
88 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
89 #if CONFIG_PGTABLE_LEVELS == 3
90 #define pmd_ERROR(e) \
91 	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
92 #endif
93 #define pgd_ERROR(e) \
94 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
95 
96 /* This is the size of the initially mapped kernel memory */
97 #if defined(CONFIG_64BIT)
98 #define KERNEL_INITIAL_ORDER	26	/* 1<<26 = 64MB */
99 #else
100 #define KERNEL_INITIAL_ORDER	25	/* 1<<25 = 32MB */
101 #endif
102 #define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)
103 
104 #if CONFIG_PGTABLE_LEVELS == 3
105 #define PMD_TABLE_ORDER	1
106 #define PGD_TABLE_ORDER	0
107 #else
108 #define PGD_TABLE_ORDER	1
109 #endif
110 
111 /* Definitions for 3rd level (we use PLD here for Page Lower directory
112  * because PTE_SHIFT is used lower down to mean shift that has to be
113  * done to get usable bits out of the PTE) */
114 #define PLD_SHIFT	PAGE_SHIFT
115 #define PLD_SIZE	PAGE_SIZE
116 #define BITS_PER_PTE	(PAGE_SHIFT - BITS_PER_PTE_ENTRY)
117 #define PTRS_PER_PTE    (1UL << BITS_PER_PTE)
118 
119 /* Definitions for 2nd level */
120 #if CONFIG_PGTABLE_LEVELS == 3
121 #define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE)
122 #define PMD_SIZE	(1UL << PMD_SHIFT)
123 #define PMD_MASK	(~(PMD_SIZE-1))
124 #define BITS_PER_PMD	(PAGE_SHIFT + PMD_TABLE_ORDER - BITS_PER_PMD_ENTRY)
125 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
126 #else
127 #define BITS_PER_PMD	0
128 #endif
129 
130 /* Definitions for 1st level */
131 #define PGDIR_SHIFT	(PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
132 #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
133 #define BITS_PER_PGD	(BITS_PER_LONG - PGDIR_SHIFT)
134 #else
135 #define BITS_PER_PGD	(PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY)
136 #endif
137 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
138 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
139 #define PTRS_PER_PGD    (1UL << BITS_PER_PGD)
140 #define USER_PTRS_PER_PGD       PTRS_PER_PGD
141 
142 #ifdef CONFIG_64BIT
143 #define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD)
144 #define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
145 #define SPACEID_SHIFT	(MAX_ADDRBITS - 32)
146 #else
147 #define MAX_ADDRBITS	(BITS_PER_LONG)
148 #define MAX_ADDRESS	(1ULL << MAX_ADDRBITS)
149 #define SPACEID_SHIFT	0
150 #endif
151 
152 /* This calculates the number of initial pages we need for the initial
153  * page tables */
154 #if (KERNEL_INITIAL_ORDER) >= (PLD_SHIFT + BITS_PER_PTE)
155 # define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PLD_SHIFT - BITS_PER_PTE))
156 #else
157 # define PT_INITIAL	(1)  /* all initial PTEs fit into one page */
158 #endif
159 
160 /*
161  * pgd entries used up by user/kernel:
162  */
163 
164 /* NB: The tlb miss handlers make certain assumptions about the order */
165 /*     of the following bits, so be careful (One example, bits 25-31  */
166 /*     are moved together in one instruction).                        */
167 
168 #define _PAGE_READ_BIT     31   /* (0x001) read access allowed */
169 #define _PAGE_WRITE_BIT    30   /* (0x002) write access allowed */
170 #define _PAGE_EXEC_BIT     29   /* (0x004) execute access allowed */
171 #define _PAGE_GATEWAY_BIT  28   /* (0x008) privilege promotion allowed */
172 #define _PAGE_DMB_BIT      27   /* (0x010) Data Memory Break enable (B bit) */
173 #define _PAGE_DIRTY_BIT    26   /* (0x020) Page Dirty (D bit) */
174 #define _PAGE_REFTRAP_BIT  25   /* (0x040) Page Ref. Trap enable (T bit) */
175 #define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */
176 #define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */
177 #define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */
178 #define _PAGE_HPAGE_BIT    21   /* (0x400) Software: Huge Page */
179 #define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */
180 #ifdef CONFIG_HUGETLB_PAGE
181 #define _PAGE_SPECIAL_BIT  _PAGE_DMB_BIT  /* DMB feature is currently unused */
182 #else
183 #define _PAGE_SPECIAL_BIT  _PAGE_HPAGE_BIT /* use unused HUGE PAGE bit */
184 #endif
185 
186 /* N.B. The bits are defined in terms of a 32 bit word above, so the */
187 /*      following macro is ok for both 32 and 64 bit.                */
188 
189 #define xlate_pabit(x) (31 - x)
190 
191 /* this defines the shift to the usable bits in the PTE it is set so
192  * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
193  * to zero */
194 #define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT)
195 
196 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
197 #define PFN_PTE_SHIFT		12
198 
199 #define _PAGE_READ     (1 << xlate_pabit(_PAGE_READ_BIT))
200 #define _PAGE_WRITE    (1 << xlate_pabit(_PAGE_WRITE_BIT))
201 #define _PAGE_RW       (_PAGE_READ | _PAGE_WRITE)
202 #define _PAGE_EXEC     (1 << xlate_pabit(_PAGE_EXEC_BIT))
203 #define _PAGE_GATEWAY  (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
204 #define _PAGE_DMB      (1 << xlate_pabit(_PAGE_DMB_BIT))
205 #define _PAGE_DIRTY    (1 << xlate_pabit(_PAGE_DIRTY_BIT))
206 #define _PAGE_REFTRAP  (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
207 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
208 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
209 #define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT))
210 #define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
211 #define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
212 #define _PAGE_SPECIAL  (1 << xlate_pabit(_PAGE_SPECIAL_BIT))
213 
214 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
215 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
216 #define _PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
217 #define _PAGE_KERNEL_EXEC	(_PAGE_KERNEL_RO | _PAGE_EXEC)
218 #define _PAGE_KERNEL_RWX	(_PAGE_KERNEL_EXEC | _PAGE_WRITE)
219 #define _PAGE_KERNEL		(_PAGE_KERNEL_RO | _PAGE_WRITE)
220 
221 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
222  * are page-aligned, we don't care about the PAGE_OFFSET bits, except
223  * for a few meta-information bits, so we shift the address to be
224  * able to effectively address 40/42/44-bits of physical address space
225  * depending on 4k/16k/64k PAGE_SIZE */
226 #define _PxD_PRESENT_BIT   31
227 #define _PxD_VALID_BIT     30
228 
229 #define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT))
230 #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
231 #define PxD_FLAG_MASK     (0xf)
232 #define PxD_FLAG_SHIFT    (4)
233 #define PxD_VALUE_SHIFT   (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
234 
235 #ifndef __ASSEMBLY__
236 
237 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER)
238 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
239 /* Others seem to make this executable, I don't know if that's correct
240    or not.  The stack is mapped this way though so this is necessary
241    in the short term - dhd@linuxcare.com, 2000-08-08 */
242 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
243 #define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
244 #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
245 #define PAGE_COPY       PAGE_EXECREAD
246 #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
247 #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
248 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
249 #define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
250 #define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
251 #define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
252 #define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
253 
254 
255 /*
256  * We could have an execute only page using "gateway - promote to priv
257  * level 3", but that is kind of silly. So, the way things are defined
258  * now, we must always have read permission for pages with execute
259  * permission. For the fun of it we'll go ahead and support write only
260  * pages.
261  */
262 
263 	 /*xwr*/
264 
265 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
266 
267 /* initial page tables for 0-8MB for kernel */
268 
269 extern pte_t pg0[];
270 
271 /* zero page used for uninitialized stuff */
272 
273 extern unsigned long *empty_zero_page;
274 
275 /*
276  * ZERO_PAGE is a global shared page that is always zero: used
277  * for zero-mapped memory areas etc..
278  */
279 
280 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
281 
282 #define pte_none(x)     (pte_val(x) == 0)
283 #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
284 #define pte_user(x)	(pte_val(x) & _PAGE_USER)
285 #define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
286 
287 #define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK)
288 #define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
289 #define pud_flag(x)	(pud_val(x) & PxD_FLAG_MASK)
290 #define pud_address(x)	((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
291 #define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK)
292 #define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
293 
294 #define pmd_none(x)	(!pmd_val(x))
295 #define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID))
296 #define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT)
297 static inline void pmd_clear(pmd_t *pmd) {
298 		set_pmd(pmd,  __pmd(0));
299 }
300 
301 
302 
303 #if CONFIG_PGTABLE_LEVELS == 3
304 #define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud)))
305 #define pud_page(pud)	virt_to_page((void *)pud_pgtable(pud))
306 
307 /* For 64 bit we have three level tables */
308 
309 #define pud_none(x)     (!pud_val(x))
310 #define pud_bad(x)      (!(pud_flag(x) & PxD_FLAG_VALID))
311 #define pud_present(x)  (pud_flag(x) & PxD_FLAG_PRESENT)
312 static inline void pud_clear(pud_t *pud) {
313 	set_pud(pud, __pud(0));
314 }
315 #endif
316 
317 /*
318  * The following only work if pte_present() is true.
319  * Undefined behaviour if not..
320  */
321 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
322 static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
323 static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_WRITE; }
324 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
325 
326 static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
327 static inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
328 static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) &= ~_PAGE_WRITE; return pte; }
329 static inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
330 static inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
331 static inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return pte; }
332 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
333 
334 /*
335  * Huge pte definitions.
336  */
337 #ifdef CONFIG_HUGETLB_PAGE
338 #define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
339 #define pte_mkhuge(pte)         (__pte(pte_val(pte) | \
340 				 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
341 #else
342 #define pte_huge(pte)           (0)
343 #define pte_mkhuge(pte)         (pte)
344 #endif
345 
346 
347 /*
348  * Conversion functions: convert a page and protection to a page entry,
349  * and a page entry and page directory to the page they refer to.
350  */
351 #define __mk_pte(addr,pgprot) \
352 ({									\
353 	pte_t __pte;							\
354 									\
355 	pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));	\
356 									\
357 	__pte;								\
358 })
359 
360 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
361 
362 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
363 {
364 	pte_t pte;
365 	pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
366 	return pte;
367 }
368 
369 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
370 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
371 
372 /* Permanent address of a page.  On parisc we don't have highmem. */
373 
374 #define pte_pfn(x)		(pte_val(x) >> PFN_PTE_SHIFT)
375 
376 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
377 
378 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
379 {
380 	return ((unsigned long) __va(pmd_address(pmd)));
381 }
382 
383 #define pmd_pfn(pmd)	(pmd_address(pmd) >> PAGE_SHIFT)
384 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
385 #define pmd_page(pmd)	virt_to_page((void *)__pmd_page(pmd))
386 
387 /* Find an entry in the second-level page table.. */
388 
389 extern void paging_init (void);
390 
391 /* Used for deferring calls to flush_dcache_page() */
392 
393 #define PG_dcache_dirty         PG_arch_1
394 
395 #define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep)
396 
397 /* Encode and de-code a swap entry */
398 
399 #define __swp_type(x)                     ((x).val & 0x1f)
400 #define __swp_offset(x)                   ( (((x).val >> 6) &  0x7) | \
401 					  (((x).val >> 8) & ~0x7) )
402 #define __swp_entry(type, offset)         ((swp_entry_t) { (type) | \
403 					    ((offset &  0x7) << 6) | \
404 					    ((offset & ~0x7) << 8) })
405 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
406 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
407 
408 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
409 {
410 	pte_t pte;
411 
412 	if (!pte_young(*ptep))
413 		return 0;
414 
415 	pte = *ptep;
416 	if (!pte_young(pte)) {
417 		return 0;
418 	}
419 	set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
420 	return 1;
421 }
422 
423 struct mm_struct;
424 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
425 {
426 	pte_t old_pte;
427 
428 	old_pte = *ptep;
429 	set_pte_at(mm, addr, ptep, __pte(0));
430 
431 	return old_pte;
432 }
433 
434 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
435 {
436 	set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
437 }
438 
439 #define pte_same(A,B)	(pte_val(A) == pte_val(B))
440 
441 struct seq_file;
442 extern void arch_report_meminfo(struct seq_file *m);
443 
444 #endif /* !__ASSEMBLY__ */
445 
446 
447 /* TLB page size encoding - see table 3-1 in parisc20.pdf */
448 #define _PAGE_SIZE_ENCODING_4K		0
449 #define _PAGE_SIZE_ENCODING_16K		1
450 #define _PAGE_SIZE_ENCODING_64K		2
451 #define _PAGE_SIZE_ENCODING_256K	3
452 #define _PAGE_SIZE_ENCODING_1M		4
453 #define _PAGE_SIZE_ENCODING_4M		5
454 #define _PAGE_SIZE_ENCODING_16M		6
455 #define _PAGE_SIZE_ENCODING_64M		7
456 
457 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
458 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
459 #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
460 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
461 #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
462 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
463 #endif
464 
465 
466 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
467 
468 /* We provide our own get_unmapped_area to provide cache coherency */
469 
470 #define HAVE_ARCH_UNMAPPED_AREA
471 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
472 
473 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
474 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
475 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
476 #define __HAVE_ARCH_PTE_SAME
477 
478 #endif /* _PARISC_PGTABLE_H */
479