xref: /openbmc/linux/arch/parisc/include/asm/pgtable.h (revision 05911c5d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _PARISC_PGTABLE_H
3 #define _PARISC_PGTABLE_H
4 
5 #include <asm/page.h>
6 
7 #if CONFIG_PGTABLE_LEVELS == 3
8 #include <asm-generic/pgtable-nopud.h>
9 #elif CONFIG_PGTABLE_LEVELS == 2
10 #include <asm-generic/pgtable-nopmd.h>
11 #endif
12 
13 #include <asm/fixmap.h>
14 
15 #ifndef __ASSEMBLY__
16 /*
17  * we simulate an x86-style page table for the linux mm code
18  */
19 
20 #include <linux/bitops.h>
21 #include <linux/spinlock.h>
22 #include <linux/mm_types.h>
23 #include <asm/processor.h>
24 #include <asm/cache.h>
25 
26 /*
27  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
28  * memory.  For the return value to be meaningful, ADDR must be >=
29  * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
30  * require a hash-, or multi-level tree-lookup or something of that
31  * sort) but it guarantees to return TRUE only if accessing the page
32  * at that address does not cause an error.  Note that there may be
33  * addresses for which kern_addr_valid() returns FALSE even though an
34  * access would not cause an error (e.g., this is typically true for
35  * memory mapped I/O regions.
36  *
37  * XXX Need to implement this for parisc.
38  */
39 #define kern_addr_valid(addr)	(1)
40 
41 /* This is for the serialization of PxTLB broadcasts. At least on the N class
42  * systems, only one PxTLB inter processor broadcast can be active at any one
43  * time on the Merced bus. */
44 extern spinlock_t pa_tlb_flush_lock;
45 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
46 extern int pa_serialize_tlb_flushes;
47 #else
48 #define pa_serialize_tlb_flushes        (0)
49 #endif
50 
51 #define purge_tlb_start(flags)  do { \
52 	if (pa_serialize_tlb_flushes)	\
53 		spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
54 	else \
55 		local_irq_save(flags);	\
56 	} while (0)
57 #define purge_tlb_end(flags)	do { \
58 	if (pa_serialize_tlb_flushes)	\
59 		spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
60 	else \
61 		local_irq_restore(flags); \
62 	} while (0)
63 
64 /* Purge data and instruction TLB entries. The TLB purge instructions
65  * are slow on SMP machines since the purge must be broadcast to all CPUs.
66  */
67 
68 static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
69 {
70 	unsigned long flags;
71 
72 	purge_tlb_start(flags);
73 	mtsp(mm->context, 1);
74 	pdtlb(addr);
75 	pitlb(addr);
76 	purge_tlb_end(flags);
77 }
78 
79 /* Certain architectures need to do special things when PTEs
80  * within a page table are directly modified.  Thus, the following
81  * hook is made available.
82  */
83 #define set_pte(pteptr, pteval)			\
84 	do {					\
85 		*(pteptr) = (pteval);		\
86 		barrier();			\
87 	} while(0)
88 
89 #define set_pte_at(mm, addr, pteptr, pteval)	\
90 	do {					\
91 		*(pteptr) = (pteval);		\
92 		purge_tlb_entries(mm, addr);	\
93 	} while (0)
94 
95 #endif /* !__ASSEMBLY__ */
96 
97 #define pte_ERROR(e) \
98 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
99 #if CONFIG_PGTABLE_LEVELS == 3
100 #define pmd_ERROR(e) \
101 	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
102 #endif
103 #define pgd_ERROR(e) \
104 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
105 
106 /* This is the size of the initially mapped kernel memory */
107 #if defined(CONFIG_64BIT)
108 #define KERNEL_INITIAL_ORDER	26	/* 1<<26 = 64MB */
109 #else
110 #define KERNEL_INITIAL_ORDER	25	/* 1<<25 = 32MB */
111 #endif
112 #define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)
113 
114 #if CONFIG_PGTABLE_LEVELS == 3
115 #define PMD_ORDER	1
116 #define PGD_ORDER	0
117 #else
118 #define PGD_ORDER	1
119 #endif
120 
121 /* Definitions for 3rd level (we use PLD here for Page Lower directory
122  * because PTE_SHIFT is used lower down to mean shift that has to be
123  * done to get usable bits out of the PTE) */
124 #define PLD_SHIFT	PAGE_SHIFT
125 #define PLD_SIZE	PAGE_SIZE
126 #define BITS_PER_PTE	(PAGE_SHIFT - BITS_PER_PTE_ENTRY)
127 #define PTRS_PER_PTE    (1UL << BITS_PER_PTE)
128 
129 /* Definitions for 2nd level */
130 #if CONFIG_PGTABLE_LEVELS == 3
131 #define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE)
132 #define PMD_SIZE	(1UL << PMD_SHIFT)
133 #define PMD_MASK	(~(PMD_SIZE-1))
134 #define BITS_PER_PMD	(PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
135 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
136 #else
137 #define BITS_PER_PMD	0
138 #endif
139 
140 /* Definitions for 1st level */
141 #define PGDIR_SHIFT	(PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
142 #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
143 #define BITS_PER_PGD	(BITS_PER_LONG - PGDIR_SHIFT)
144 #else
145 #define BITS_PER_PGD	(PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
146 #endif
147 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
148 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
149 #define PTRS_PER_PGD    (1UL << BITS_PER_PGD)
150 #define USER_PTRS_PER_PGD       PTRS_PER_PGD
151 
152 #ifdef CONFIG_64BIT
153 #define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD)
154 #define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
155 #define SPACEID_SHIFT	(MAX_ADDRBITS - 32)
156 #else
157 #define MAX_ADDRBITS	(BITS_PER_LONG)
158 #define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
159 #define SPACEID_SHIFT	0
160 #endif
161 
162 /* This calculates the number of initial pages we need for the initial
163  * page tables */
164 #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
165 # define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
166 #else
167 # define PT_INITIAL	(1)  /* all initial PTEs fit into one page */
168 #endif
169 
170 /*
171  * pgd entries used up by user/kernel:
172  */
173 
174 /* NB: The tlb miss handlers make certain assumptions about the order */
175 /*     of the following bits, so be careful (One example, bits 25-31  */
176 /*     are moved together in one instruction).                        */
177 
178 #define _PAGE_READ_BIT     31   /* (0x001) read access allowed */
179 #define _PAGE_WRITE_BIT    30   /* (0x002) write access allowed */
180 #define _PAGE_EXEC_BIT     29   /* (0x004) execute access allowed */
181 #define _PAGE_GATEWAY_BIT  28   /* (0x008) privilege promotion allowed */
182 #define _PAGE_DMB_BIT      27   /* (0x010) Data Memory Break enable (B bit) */
183 #define _PAGE_DIRTY_BIT    26   /* (0x020) Page Dirty (D bit) */
184 #define _PAGE_REFTRAP_BIT  25   /* (0x040) Page Ref. Trap enable (T bit) */
185 #define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */
186 #define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */
187 #define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */
188 #define _PAGE_HPAGE_BIT    21   /* (0x400) Software: Huge Page */
189 #define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */
190 
191 /* N.B. The bits are defined in terms of a 32 bit word above, so the */
192 /*      following macro is ok for both 32 and 64 bit.                */
193 
194 #define xlate_pabit(x) (31 - x)
195 
196 /* this defines the shift to the usable bits in the PTE it is set so
197  * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
198  * to zero */
199 #define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT)
200 
201 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
202 #define PFN_PTE_SHIFT		12
203 
204 #define _PAGE_READ     (1 << xlate_pabit(_PAGE_READ_BIT))
205 #define _PAGE_WRITE    (1 << xlate_pabit(_PAGE_WRITE_BIT))
206 #define _PAGE_RW       (_PAGE_READ | _PAGE_WRITE)
207 #define _PAGE_EXEC     (1 << xlate_pabit(_PAGE_EXEC_BIT))
208 #define _PAGE_GATEWAY  (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
209 #define _PAGE_DMB      (1 << xlate_pabit(_PAGE_DMB_BIT))
210 #define _PAGE_DIRTY    (1 << xlate_pabit(_PAGE_DIRTY_BIT))
211 #define _PAGE_REFTRAP  (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
212 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
213 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
214 #define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT))
215 #define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
216 #define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
217 
218 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
219 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
220 #define _PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
221 #define _PAGE_KERNEL_EXEC	(_PAGE_KERNEL_RO | _PAGE_EXEC)
222 #define _PAGE_KERNEL_RWX	(_PAGE_KERNEL_EXEC | _PAGE_WRITE)
223 #define _PAGE_KERNEL		(_PAGE_KERNEL_RO | _PAGE_WRITE)
224 
225 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
226  * are page-aligned, we don't care about the PAGE_OFFSET bits, except
227  * for a few meta-information bits, so we shift the address to be
228  * able to effectively address 40/42/44-bits of physical address space
229  * depending on 4k/16k/64k PAGE_SIZE */
230 #define _PxD_PRESENT_BIT   31
231 #define _PxD_VALID_BIT     30
232 
233 #define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT))
234 #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
235 #define PxD_FLAG_MASK     (0xf)
236 #define PxD_FLAG_SHIFT    (4)
237 #define PxD_VALUE_SHIFT   (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
238 
239 #ifndef __ASSEMBLY__
240 
241 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER)
242 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
243 /* Others seem to make this executable, I don't know if that's correct
244    or not.  The stack is mapped this way though so this is necessary
245    in the short term - dhd@linuxcare.com, 2000-08-08 */
246 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
247 #define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
248 #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
249 #define PAGE_COPY       PAGE_EXECREAD
250 #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
251 #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
252 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
253 #define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
254 #define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
255 #define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
256 #define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
257 
258 
259 /*
260  * We could have an execute only page using "gateway - promote to priv
261  * level 3", but that is kind of silly. So, the way things are defined
262  * now, we must always have read permission for pages with execute
263  * permission. For the fun of it we'll go ahead and support write only
264  * pages.
265  */
266 
267 	 /*xwr*/
268 #define __P000  PAGE_NONE
269 #define __P001  PAGE_READONLY
270 #define __P010  __P000 /* copy on write */
271 #define __P011  __P001 /* copy on write */
272 #define __P100  PAGE_EXECREAD
273 #define __P101  PAGE_EXECREAD
274 #define __P110  __P100 /* copy on write */
275 #define __P111  __P101 /* copy on write */
276 
277 #define __S000  PAGE_NONE
278 #define __S001  PAGE_READONLY
279 #define __S010  PAGE_WRITEONLY
280 #define __S011  PAGE_SHARED
281 #define __S100  PAGE_EXECREAD
282 #define __S101  PAGE_EXECREAD
283 #define __S110  PAGE_RWX
284 #define __S111  PAGE_RWX
285 
286 
287 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
288 
289 /* initial page tables for 0-8MB for kernel */
290 
291 extern pte_t pg0[];
292 
293 /* zero page used for uninitialized stuff */
294 
295 extern unsigned long *empty_zero_page;
296 
297 /*
298  * ZERO_PAGE is a global shared page that is always zero: used
299  * for zero-mapped memory areas etc..
300  */
301 
302 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
303 
304 #define pte_none(x)     (pte_val(x) == 0)
305 #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
306 #define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
307 
308 #define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK)
309 #define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
310 #define pud_flag(x)	(pud_val(x) & PxD_FLAG_MASK)
311 #define pud_address(x)	((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
312 #define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK)
313 #define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
314 
315 #define pmd_none(x)	(!pmd_val(x))
316 #define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID))
317 #define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT)
318 static inline void pmd_clear(pmd_t *pmd) {
319 		set_pmd(pmd,  __pmd(0));
320 }
321 
322 
323 
324 #if CONFIG_PGTABLE_LEVELS == 3
325 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud)))
326 #define pud_page(pud)	virt_to_page((void *)pud_page_vaddr(pud))
327 
328 /* For 64 bit we have three level tables */
329 
330 #define pud_none(x)     (!pud_val(x))
331 #define pud_bad(x)      (!(pud_flag(x) & PxD_FLAG_VALID))
332 #define pud_present(x)  (pud_flag(x) & PxD_FLAG_PRESENT)
333 static inline void pud_clear(pud_t *pud) {
334 	set_pud(pud, __pud(0));
335 }
336 #endif
337 
338 /*
339  * The following only work if pte_present() is true.
340  * Undefined behaviour if not..
341  */
342 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
343 static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
344 static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_WRITE; }
345 
346 static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
347 static inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
348 static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) &= ~_PAGE_WRITE; return pte; }
349 static inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
350 static inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
351 static inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return pte; }
352 
353 /*
354  * Huge pte definitions.
355  */
356 #ifdef CONFIG_HUGETLB_PAGE
357 #define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
358 #define pte_mkhuge(pte)         (__pte(pte_val(pte) | \
359 				 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
360 #else
361 #define pte_huge(pte)           (0)
362 #define pte_mkhuge(pte)         (pte)
363 #endif
364 
365 
366 /*
367  * Conversion functions: convert a page and protection to a page entry,
368  * and a page entry and page directory to the page they refer to.
369  */
370 #define __mk_pte(addr,pgprot) \
371 ({									\
372 	pte_t __pte;							\
373 									\
374 	pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));	\
375 									\
376 	__pte;								\
377 })
378 
379 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
380 
381 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
382 {
383 	pte_t pte;
384 	pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
385 	return pte;
386 }
387 
388 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
389 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
390 
391 /* Permanent address of a page.  On parisc we don't have highmem. */
392 
393 #define pte_pfn(x)		(pte_val(x) >> PFN_PTE_SHIFT)
394 
395 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
396 
397 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
398 {
399 	return ((unsigned long) __va(pmd_address(pmd)));
400 }
401 
402 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
403 #define pmd_page(pmd)	virt_to_page((void *)__pmd_page(pmd))
404 
405 /* Find an entry in the second-level page table.. */
406 
407 extern void paging_init (void);
408 
409 /* Used for deferring calls to flush_dcache_page() */
410 
411 #define PG_dcache_dirty         PG_arch_1
412 
413 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
414 
415 /* Encode and de-code a swap entry */
416 
417 #define __swp_type(x)                     ((x).val & 0x1f)
418 #define __swp_offset(x)                   ( (((x).val >> 6) &  0x7) | \
419 					  (((x).val >> 8) & ~0x7) )
420 #define __swp_entry(type, offset)         ((swp_entry_t) { (type) | \
421 					    ((offset &  0x7) << 6) | \
422 					    ((offset & ~0x7) << 8) })
423 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
424 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
425 
426 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
427 {
428 	pte_t pte;
429 
430 	if (!pte_young(*ptep))
431 		return 0;
432 
433 	pte = *ptep;
434 	if (!pte_young(pte)) {
435 		return 0;
436 	}
437 	set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
438 	return 1;
439 }
440 
441 struct mm_struct;
442 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
443 {
444 	pte_t old_pte;
445 
446 	old_pte = *ptep;
447 	set_pte_at(mm, addr, ptep, __pte(0));
448 
449 	return old_pte;
450 }
451 
452 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
453 {
454 	set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
455 }
456 
457 #define pte_same(A,B)	(pte_val(A) == pte_val(B))
458 
459 struct seq_file;
460 extern void arch_report_meminfo(struct seq_file *m);
461 
462 #endif /* !__ASSEMBLY__ */
463 
464 
465 /* TLB page size encoding - see table 3-1 in parisc20.pdf */
466 #define _PAGE_SIZE_ENCODING_4K		0
467 #define _PAGE_SIZE_ENCODING_16K		1
468 #define _PAGE_SIZE_ENCODING_64K		2
469 #define _PAGE_SIZE_ENCODING_256K	3
470 #define _PAGE_SIZE_ENCODING_1M		4
471 #define _PAGE_SIZE_ENCODING_4M		5
472 #define _PAGE_SIZE_ENCODING_16M		6
473 #define _PAGE_SIZE_ENCODING_64M		7
474 
475 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
476 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
477 #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
478 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
479 #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
480 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
481 #endif
482 
483 
484 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
485 
486 /* We provide our own get_unmapped_area to provide cache coherency */
487 
488 #define HAVE_ARCH_UNMAPPED_AREA
489 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
490 
491 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
492 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
493 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
494 #define __HAVE_ARCH_PTE_SAME
495 
496 #endif /* _PARISC_PGTABLE_H */
497