xref: /openbmc/linux/arch/parisc/include/asm/pgtable.h (revision d7a3d85e)
1 #ifndef _PARISC_PGTABLE_H
2 #define _PARISC_PGTABLE_H
3 
4 #include <asm-generic/4level-fixup.h>
5 
6 #include <asm/fixmap.h>
7 
8 #ifndef __ASSEMBLY__
9 /*
10  * we simulate an x86-style page table for the linux mm code
11  */
12 
13 #include <linux/bitops.h>
14 #include <linux/spinlock.h>
15 #include <linux/mm_types.h>
16 #include <asm/processor.h>
17 #include <asm/cache.h>
18 
19 extern spinlock_t pa_dbit_lock;
20 
21 /*
22  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
23  * memory.  For the return value to be meaningful, ADDR must be >=
24  * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
25  * require a hash-, or multi-level tree-lookup or something of that
26  * sort) but it guarantees to return TRUE only if accessing the page
27  * at that address does not cause an error.  Note that there may be
28  * addresses for which kern_addr_valid() returns FALSE even though an
29  * access would not cause an error (e.g., this is typically true for
30  * memory mapped I/O regions.
31  *
32  * XXX Need to implement this for parisc.
33  */
34 #define kern_addr_valid(addr)	(1)
35 
36 /* Certain architectures need to do special things when PTEs
37  * within a page table are directly modified.  Thus, the following
38  * hook is made available.
39  */
40 #define set_pte(pteptr, pteval)                                 \
41         do{                                                     \
42                 *(pteptr) = (pteval);                           \
43         } while(0)
44 
45 extern void purge_tlb_entries(struct mm_struct *, unsigned long);
46 
47 #define set_pte_at(mm, addr, ptep, pteval)                      \
48 	do {                                                    \
49 		unsigned long flags;				\
50 		spin_lock_irqsave(&pa_dbit_lock, flags);	\
51 		set_pte(ptep, pteval);                          \
52 		purge_tlb_entries(mm, addr);                    \
53 		spin_unlock_irqrestore(&pa_dbit_lock, flags);	\
54 	} while (0)
55 
56 #endif /* !__ASSEMBLY__ */
57 
58 #include <asm/page.h>
59 
60 #define pte_ERROR(e) \
61 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
62 #define pmd_ERROR(e) \
63 	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
64 #define pgd_ERROR(e) \
65 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
66 
67 /* This is the size of the initially mapped kernel memory */
68 #define KERNEL_INITIAL_ORDER	24	/* 0 to 1<<24 = 16MB */
69 #define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)
70 
71 #if CONFIG_PGTABLE_LEVELS == 3
72 #define PGD_ORDER	1 /* Number of pages per pgd */
73 #define PMD_ORDER	1 /* Number of pages per pmd */
74 #define PGD_ALLOC_ORDER	2 /* first pgd contains pmd */
75 #else
76 #define PGD_ORDER	1 /* Number of pages per pgd */
77 #define PGD_ALLOC_ORDER	PGD_ORDER
78 #endif
79 
80 /* Definitions for 3rd level (we use PLD here for Page Lower directory
81  * because PTE_SHIFT is used lower down to mean shift that has to be
82  * done to get usable bits out of the PTE) */
83 #define PLD_SHIFT	PAGE_SHIFT
84 #define PLD_SIZE	PAGE_SIZE
85 #define BITS_PER_PTE	(PAGE_SHIFT - BITS_PER_PTE_ENTRY)
86 #define PTRS_PER_PTE    (1UL << BITS_PER_PTE)
87 
88 /* Definitions for 2nd level */
89 #define pgtable_cache_init()	do { } while (0)
90 
91 #define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE)
92 #define PMD_SIZE	(1UL << PMD_SHIFT)
93 #define PMD_MASK	(~(PMD_SIZE-1))
94 #if CONFIG_PGTABLE_LEVELS == 3
95 #define BITS_PER_PMD	(PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
96 #else
97 #define __PAGETABLE_PMD_FOLDED
98 #define BITS_PER_PMD	0
99 #endif
100 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
101 
102 /* Definitions for 1st level */
103 #define PGDIR_SHIFT	(PMD_SHIFT + BITS_PER_PMD)
104 #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
105 #define BITS_PER_PGD	(BITS_PER_LONG - PGDIR_SHIFT)
106 #else
107 #define BITS_PER_PGD	(PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
108 #endif
109 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
110 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
111 #define PTRS_PER_PGD    (1UL << BITS_PER_PGD)
112 #define USER_PTRS_PER_PGD       PTRS_PER_PGD
113 
114 #ifdef CONFIG_64BIT
115 #define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD)
116 #define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
117 #define SPACEID_SHIFT	(MAX_ADDRBITS - 32)
118 #else
119 #define MAX_ADDRBITS	(BITS_PER_LONG)
120 #define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
121 #define SPACEID_SHIFT	0
122 #endif
123 
124 /* This calculates the number of initial pages we need for the initial
125  * page tables */
126 #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
127 # define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
128 #else
129 # define PT_INITIAL	(1)  /* all initial PTEs fit into one page */
130 #endif
131 
132 /*
133  * pgd entries used up by user/kernel:
134  */
135 
136 #define FIRST_USER_ADDRESS	0UL
137 
138 /* NB: The tlb miss handlers make certain assumptions about the order */
139 /*     of the following bits, so be careful (One example, bits 25-31  */
140 /*     are moved together in one instruction).                        */
141 
142 #define _PAGE_READ_BIT     31   /* (0x001) read access allowed */
143 #define _PAGE_WRITE_BIT    30   /* (0x002) write access allowed */
144 #define _PAGE_EXEC_BIT     29   /* (0x004) execute access allowed */
145 #define _PAGE_GATEWAY_BIT  28   /* (0x008) privilege promotion allowed */
146 #define _PAGE_DMB_BIT      27   /* (0x010) Data Memory Break enable (B bit) */
147 #define _PAGE_DIRTY_BIT    26   /* (0x020) Page Dirty (D bit) */
148 #define _PAGE_REFTRAP_BIT  25   /* (0x040) Page Ref. Trap enable (T bit) */
149 #define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */
150 #define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */
151 #define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */
152 /* bit 21 was formerly the FLUSH bit but is now unused */
153 #define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */
154 
155 /* N.B. The bits are defined in terms of a 32 bit word above, so the */
156 /*      following macro is ok for both 32 and 64 bit.                */
157 
158 #define xlate_pabit(x) (31 - x)
159 
160 /* this defines the shift to the usable bits in the PTE it is set so
161  * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
162  * to zero */
163 #define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT)
164 
165 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
166 #define PFN_PTE_SHIFT		12
167 
168 #define _PAGE_READ     (1 << xlate_pabit(_PAGE_READ_BIT))
169 #define _PAGE_WRITE    (1 << xlate_pabit(_PAGE_WRITE_BIT))
170 #define _PAGE_RW       (_PAGE_READ | _PAGE_WRITE)
171 #define _PAGE_EXEC     (1 << xlate_pabit(_PAGE_EXEC_BIT))
172 #define _PAGE_GATEWAY  (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
173 #define _PAGE_DMB      (1 << xlate_pabit(_PAGE_DMB_BIT))
174 #define _PAGE_DIRTY    (1 << xlate_pabit(_PAGE_DIRTY_BIT))
175 #define _PAGE_REFTRAP  (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
176 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
177 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
178 #define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT))
179 #define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
180 
181 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED)
182 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
183 #define _PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
184 #define _PAGE_KERNEL_EXEC	(_PAGE_KERNEL_RO | _PAGE_EXEC)
185 #define _PAGE_KERNEL_RWX	(_PAGE_KERNEL_EXEC | _PAGE_WRITE)
186 #define _PAGE_KERNEL		(_PAGE_KERNEL_RO | _PAGE_WRITE)
187 
188 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
189  * are page-aligned, we don't care about the PAGE_OFFSET bits, except
190  * for a few meta-information bits, so we shift the address to be
191  * able to effectively address 40/42/44-bits of physical address space
192  * depending on 4k/16k/64k PAGE_SIZE */
193 #define _PxD_PRESENT_BIT   31
194 #define _PxD_ATTACHED_BIT  30
195 #define _PxD_VALID_BIT     29
196 
197 #define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT))
198 #define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
199 #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
200 #define PxD_FLAG_MASK     (0xf)
201 #define PxD_FLAG_SHIFT    (4)
202 #define PxD_VALUE_SHIFT   (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
203 
204 #ifndef __ASSEMBLY__
205 
206 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
207 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
208 /* Others seem to make this executable, I don't know if that's correct
209    or not.  The stack is mapped this way though so this is necessary
210    in the short term - dhd@linuxcare.com, 2000-08-08 */
211 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
212 #define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
213 #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
214 #define PAGE_COPY       PAGE_EXECREAD
215 #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
216 #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
217 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
218 #define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
219 #define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
220 #define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
221 #define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
222 
223 
224 /*
225  * We could have an execute only page using "gateway - promote to priv
226  * level 3", but that is kind of silly. So, the way things are defined
227  * now, we must always have read permission for pages with execute
228  * permission. For the fun of it we'll go ahead and support write only
229  * pages.
230  */
231 
232 	 /*xwr*/
233 #define __P000  PAGE_NONE
234 #define __P001  PAGE_READONLY
235 #define __P010  __P000 /* copy on write */
236 #define __P011  __P001 /* copy on write */
237 #define __P100  PAGE_EXECREAD
238 #define __P101  PAGE_EXECREAD
239 #define __P110  __P100 /* copy on write */
240 #define __P111  __P101 /* copy on write */
241 
242 #define __S000  PAGE_NONE
243 #define __S001  PAGE_READONLY
244 #define __S010  PAGE_WRITEONLY
245 #define __S011  PAGE_SHARED
246 #define __S100  PAGE_EXECREAD
247 #define __S101  PAGE_EXECREAD
248 #define __S110  PAGE_RWX
249 #define __S111  PAGE_RWX
250 
251 
252 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
253 
254 /* initial page tables for 0-8MB for kernel */
255 
256 extern pte_t pg0[];
257 
258 /* zero page used for uninitialized stuff */
259 
260 extern unsigned long *empty_zero_page;
261 
262 /*
263  * ZERO_PAGE is a global shared page that is always zero: used
264  * for zero-mapped memory areas etc..
265  */
266 
267 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
268 
269 #define pte_none(x)     (pte_val(x) == 0)
270 #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
271 #define pte_clear(mm,addr,xp)	do { pte_val(*(xp)) = 0; } while (0)
272 
273 #define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK)
274 #define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
275 #define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK)
276 #define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
277 
278 #if CONFIG_PGTABLE_LEVELS == 3
279 /* The first entry of the permanent pmd is not there if it contains
280  * the gateway marker */
281 #define pmd_none(x)	(!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
282 #else
283 #define pmd_none(x)	(!pmd_val(x))
284 #endif
285 #define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID))
286 #define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT)
287 static inline void pmd_clear(pmd_t *pmd) {
288 #if CONFIG_PGTABLE_LEVELS == 3
289 	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
290 		/* This is the entry pointing to the permanent pmd
291 		 * attached to the pgd; cannot clear it */
292 		__pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
293 	else
294 #endif
295 		__pmd_val_set(*pmd,  0);
296 }
297 
298 
299 
300 #if CONFIG_PGTABLE_LEVELS == 3
301 #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
302 #define pgd_page(pgd)	virt_to_page((void *)pgd_page_vaddr(pgd))
303 
304 /* For 64 bit we have three level tables */
305 
306 #define pgd_none(x)     (!pgd_val(x))
307 #define pgd_bad(x)      (!(pgd_flag(x) & PxD_FLAG_VALID))
308 #define pgd_present(x)  (pgd_flag(x) & PxD_FLAG_PRESENT)
309 static inline void pgd_clear(pgd_t *pgd) {
310 #if CONFIG_PGTABLE_LEVELS == 3
311 	if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
312 		/* This is the permanent pmd attached to the pgd; cannot
313 		 * free it */
314 		return;
315 #endif
316 	__pgd_val_set(*pgd, 0);
317 }
318 #else
319 /*
320  * The "pgd_xxx()" functions here are trivial for a folded two-level
321  * setup: the pgd is never bad, and a pmd always exists (as it's folded
322  * into the pgd entry)
323  */
324 static inline int pgd_none(pgd_t pgd)		{ return 0; }
325 static inline int pgd_bad(pgd_t pgd)		{ return 0; }
326 static inline int pgd_present(pgd_t pgd)	{ return 1; }
327 static inline void pgd_clear(pgd_t * pgdp)	{ }
328 #endif
329 
330 /*
331  * The following only work if pte_present() is true.
332  * Undefined behaviour if not..
333  */
334 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
335 static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
336 static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_WRITE; }
337 static inline int pte_special(pte_t pte)	{ return 0; }
338 
339 static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
340 static inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
341 static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) &= ~_PAGE_WRITE; return pte; }
342 static inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
343 static inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
344 static inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return pte; }
345 static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
346 
347 /*
348  * Conversion functions: convert a page and protection to a page entry,
349  * and a page entry and page directory to the page they refer to.
350  */
351 #define __mk_pte(addr,pgprot) \
352 ({									\
353 	pte_t __pte;							\
354 									\
355 	pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));	\
356 									\
357 	__pte;								\
358 })
359 
360 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
361 
362 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
363 {
364 	pte_t pte;
365 	pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
366 	return pte;
367 }
368 
369 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
370 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
371 
372 /* Permanent address of a page.  On parisc we don't have highmem. */
373 
374 #define pte_pfn(x)		(pte_val(x) >> PFN_PTE_SHIFT)
375 
376 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
377 
378 #define pmd_page_vaddr(pmd)	((unsigned long) __va(pmd_address(pmd)))
379 
380 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
381 #define pmd_page(pmd)	virt_to_page((void *)__pmd_page(pmd))
382 
383 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
384 
385 /* to find an entry in a page-table-directory */
386 #define pgd_offset(mm, address) \
387 ((mm)->pgd + ((address) >> PGDIR_SHIFT))
388 
389 /* to find an entry in a kernel page-table-directory */
390 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
391 
392 /* Find an entry in the second-level page table.. */
393 
394 #if CONFIG_PGTABLE_LEVELS == 3
395 #define pmd_offset(dir,address) \
396 ((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
397 #else
398 #define pmd_offset(dir,addr) ((pmd_t *) dir)
399 #endif
400 
401 /* Find an entry in the third-level page table.. */
402 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
403 #define pte_offset_kernel(pmd, address) \
404 	((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
405 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
406 #define pte_unmap(pte) do { } while (0)
407 
408 #define pte_unmap(pte)			do { } while (0)
409 #define pte_unmap_nested(pte)		do { } while (0)
410 
411 extern void paging_init (void);
412 
413 /* Used for deferring calls to flush_dcache_page() */
414 
415 #define PG_dcache_dirty         PG_arch_1
416 
417 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
418 
419 /* Encode and de-code a swap entry */
420 
421 #define __swp_type(x)                     ((x).val & 0x1f)
422 #define __swp_offset(x)                   ( (((x).val >> 6) &  0x7) | \
423 					  (((x).val >> 8) & ~0x7) )
424 #define __swp_entry(type, offset)         ((swp_entry_t) { (type) | \
425 					    ((offset &  0x7) << 6) | \
426 					    ((offset & ~0x7) << 8) })
427 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
428 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
429 
430 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
431 {
432 	pte_t pte;
433 	unsigned long flags;
434 
435 	if (!pte_young(*ptep))
436 		return 0;
437 
438 	spin_lock_irqsave(&pa_dbit_lock, flags);
439 	pte = *ptep;
440 	if (!pte_young(pte)) {
441 		spin_unlock_irqrestore(&pa_dbit_lock, flags);
442 		return 0;
443 	}
444 	set_pte(ptep, pte_mkold(pte));
445 	purge_tlb_entries(vma->vm_mm, addr);
446 	spin_unlock_irqrestore(&pa_dbit_lock, flags);
447 	return 1;
448 }
449 
450 struct mm_struct;
451 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
452 {
453 	pte_t old_pte;
454 	unsigned long flags;
455 
456 	spin_lock_irqsave(&pa_dbit_lock, flags);
457 	old_pte = *ptep;
458 	pte_clear(mm,addr,ptep);
459 	purge_tlb_entries(mm, addr);
460 	spin_unlock_irqrestore(&pa_dbit_lock, flags);
461 
462 	return old_pte;
463 }
464 
465 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
466 {
467 	unsigned long flags;
468 	spin_lock_irqsave(&pa_dbit_lock, flags);
469 	set_pte(ptep, pte_wrprotect(*ptep));
470 	purge_tlb_entries(mm, addr);
471 	spin_unlock_irqrestore(&pa_dbit_lock, flags);
472 }
473 
474 #define pte_same(A,B)	(pte_val(A) == pte_val(B))
475 
476 #endif /* !__ASSEMBLY__ */
477 
478 
479 /* TLB page size encoding - see table 3-1 in parisc20.pdf */
480 #define _PAGE_SIZE_ENCODING_4K		0
481 #define _PAGE_SIZE_ENCODING_16K		1
482 #define _PAGE_SIZE_ENCODING_64K		2
483 #define _PAGE_SIZE_ENCODING_256K	3
484 #define _PAGE_SIZE_ENCODING_1M		4
485 #define _PAGE_SIZE_ENCODING_4M		5
486 #define _PAGE_SIZE_ENCODING_16M		6
487 #define _PAGE_SIZE_ENCODING_64M		7
488 
489 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
490 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
491 #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
492 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
493 #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
494 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
495 #endif
496 
497 
498 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
499 
500 /* We provide our own get_unmapped_area to provide cache coherency */
501 
502 #define HAVE_ARCH_UNMAPPED_AREA
503 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
504 
505 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
506 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
507 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
508 #define __HAVE_ARCH_PTE_SAME
509 #include <asm-generic/pgtable.h>
510 
511 #endif /* _PARISC_PGTABLE_H */
512