1fe6cb7b0SVineet Gupta /* SPDX-License-Identifier: GPL-2.0-only */
2fe6cb7b0SVineet Gupta /*
3fe6cb7b0SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4fe6cb7b0SVineet Gupta  */
5fe6cb7b0SVineet Gupta 
6fe6cb7b0SVineet Gupta /*
7fe6cb7b0SVineet Gupta  * page table flags for software walked/managed MMUv3 (ARC700) and MMUv4 (HS)
8fe6cb7b0SVineet Gupta  * There correspond to the corresponding bits in the TLB
9fe6cb7b0SVineet Gupta  */
10fe6cb7b0SVineet Gupta 
11fe6cb7b0SVineet Gupta #ifndef _ASM_ARC_PGTABLE_BITS_ARCV2_H
12fe6cb7b0SVineet Gupta #define _ASM_ARC_PGTABLE_BITS_ARCV2_H
13fe6cb7b0SVineet Gupta 
14fe6cb7b0SVineet Gupta #ifdef CONFIG_ARC_CACHE_PAGES
15fe6cb7b0SVineet Gupta #define _PAGE_CACHEABLE		(1 << 0)  /* Cached (H) */
16fe6cb7b0SVineet Gupta #else
17fe6cb7b0SVineet Gupta #define _PAGE_CACHEABLE		0
18fe6cb7b0SVineet Gupta #endif
19fe6cb7b0SVineet Gupta 
20fe6cb7b0SVineet Gupta #define _PAGE_EXECUTE		(1 << 1)  /* User Execute  (H) */
21fe6cb7b0SVineet Gupta #define _PAGE_WRITE		(1 << 2)  /* User Write    (H) */
22fe6cb7b0SVineet Gupta #define _PAGE_READ		(1 << 3)  /* User Read     (H) */
23fe6cb7b0SVineet Gupta #define _PAGE_ACCESSED		(1 << 4)  /* Accessed      (s) */
24fe6cb7b0SVineet Gupta #define _PAGE_DIRTY		(1 << 5)  /* Modified      (s) */
25fe6cb7b0SVineet Gupta #define _PAGE_SPECIAL		(1 << 6)
26fe6cb7b0SVineet Gupta #define _PAGE_GLOBAL		(1 << 8)  /* ASID agnostic (H) */
27fe6cb7b0SVineet Gupta #define _PAGE_PRESENT		(1 << 9)  /* PTE/TLB Valid (H) */
28fe6cb7b0SVineet Gupta 
294a446b3dSDavid Hildenbrand /* We borrow bit 5 to store the exclusive marker in swap PTEs. */
304a446b3dSDavid Hildenbrand #define _PAGE_SWP_EXCLUSIVE	_PAGE_DIRTY
314a446b3dSDavid Hildenbrand 
32fe6cb7b0SVineet Gupta #ifdef CONFIG_ARC_MMU_V4
33fe6cb7b0SVineet Gupta #define _PAGE_HW_SZ		(1 << 10)  /* Normal/super (H) */
34fe6cb7b0SVineet Gupta #else
35fe6cb7b0SVineet Gupta #define _PAGE_HW_SZ		0
36fe6cb7b0SVineet Gupta #endif
37fe6cb7b0SVineet Gupta 
38fe6cb7b0SVineet Gupta /* Defaults for every user page */
39fe6cb7b0SVineet Gupta #define ___DEF		(_PAGE_PRESENT | _PAGE_CACHEABLE)
40fe6cb7b0SVineet Gupta 
41fe6cb7b0SVineet Gupta /* Set of bits not changed in pte_modify */
42fe6cb7b0SVineet Gupta #define _PAGE_CHG_MASK	(PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
43fe6cb7b0SVineet Gupta 							   _PAGE_SPECIAL)
44fe6cb7b0SVineet Gupta 
45fe6cb7b0SVineet Gupta /* More Abbrevaited helpers */
46fe6cb7b0SVineet Gupta #define PAGE_U_NONE     __pgprot(___DEF)
47fe6cb7b0SVineet Gupta #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
48fe6cb7b0SVineet Gupta #define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
49fe6cb7b0SVineet Gupta #define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
50fe6cb7b0SVineet Gupta #define PAGE_U_X_W_R    __pgprot(___DEF \
51fe6cb7b0SVineet Gupta 				| _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
52fe6cb7b0SVineet Gupta #define PAGE_KERNEL     __pgprot(___DEF | _PAGE_GLOBAL \
53fe6cb7b0SVineet Gupta 				| _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
54fe6cb7b0SVineet Gupta 
55fe6cb7b0SVineet Gupta #define PAGE_SHARED	PAGE_U_W_R
56fe6cb7b0SVineet Gupta 
57fe6cb7b0SVineet Gupta #define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
58fe6cb7b0SVineet Gupta 
59fe6cb7b0SVineet Gupta /*
60fe6cb7b0SVineet Gupta  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
61fe6cb7b0SVineet Gupta  *
62fe6cb7b0SVineet Gupta  * Certain cases have 1:1 mapping
63fe6cb7b0SVineet Gupta  *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
64fe6cb7b0SVineet Gupta  *       which directly corresponds to  PAGE_U_X_R
65fe6cb7b0SVineet Gupta  *
66fe6cb7b0SVineet Gupta  * Other rules which cause the divergence from 1:1 mapping
67fe6cb7b0SVineet Gupta  *
68fe6cb7b0SVineet Gupta  *  1. Although ARC700 can do exclusive execute/write protection (meaning R
69fe6cb7b0SVineet Gupta  *     can be tracked independet of X/W unlike some other CPUs), still to
70fe6cb7b0SVineet Gupta  *     keep things consistent with other archs:
71fe6cb7b0SVineet Gupta  *      -Write implies Read:   W => R
72fe6cb7b0SVineet Gupta  *      -Execute implies Read: X => R
73fe6cb7b0SVineet Gupta  *
74fe6cb7b0SVineet Gupta  *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
75fe6cb7b0SVineet Gupta  *     This is to enable COW mechanism
76fe6cb7b0SVineet Gupta  */
77fe6cb7b0SVineet Gupta 	/* xwr */
78fe6cb7b0SVineet Gupta #ifndef __ASSEMBLY__
79fe6cb7b0SVineet Gupta 
80fe6cb7b0SVineet Gupta #define pte_write(pte)		(pte_val(pte) & _PAGE_WRITE)
81fe6cb7b0SVineet Gupta #define pte_dirty(pte)		(pte_val(pte) & _PAGE_DIRTY)
82fe6cb7b0SVineet Gupta #define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
83fe6cb7b0SVineet Gupta #define pte_special(pte)	(pte_val(pte) & _PAGE_SPECIAL)
84fe6cb7b0SVineet Gupta 
85fe6cb7b0SVineet Gupta #define PTE_BIT_FUNC(fn, op) \
86fe6cb7b0SVineet Gupta 	static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
87fe6cb7b0SVineet Gupta 
88fe6cb7b0SVineet Gupta PTE_BIT_FUNC(mknotpresent,     &= ~(_PAGE_PRESENT));
89fe6cb7b0SVineet Gupta PTE_BIT_FUNC(wrprotect,	&= ~(_PAGE_WRITE));
90fe6cb7b0SVineet Gupta PTE_BIT_FUNC(mkwrite_novma,	|= (_PAGE_WRITE));
91fe6cb7b0SVineet Gupta PTE_BIT_FUNC(mkclean,	&= ~(_PAGE_DIRTY));
92fe6cb7b0SVineet Gupta PTE_BIT_FUNC(mkdirty,	|= (_PAGE_DIRTY));
93fe6cb7b0SVineet Gupta PTE_BIT_FUNC(mkold,	&= ~(_PAGE_ACCESSED));
94fe6cb7b0SVineet Gupta PTE_BIT_FUNC(mkyoung,	|= (_PAGE_ACCESSED));
95fe6cb7b0SVineet Gupta PTE_BIT_FUNC(mkspecial,	|= (_PAGE_SPECIAL));
96fe6cb7b0SVineet Gupta PTE_BIT_FUNC(mkhuge,	|= (_PAGE_HW_SZ));
97fe6cb7b0SVineet Gupta 
pte_modify(pte_t pte,pgprot_t newprot)98fe6cb7b0SVineet Gupta static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
99fe6cb7b0SVineet Gupta {
100fe6cb7b0SVineet Gupta 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
101fe6cb7b0SVineet Gupta }
102fe6cb7b0SVineet Gupta 
103*ac4cfaccSMatthew Wilcox (Oracle) struct vm_fault;
104*ac4cfaccSMatthew Wilcox (Oracle) void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
105*ac4cfaccSMatthew Wilcox (Oracle) 		unsigned long address, pte_t *ptep, unsigned int nr);
106fe6cb7b0SVineet Gupta 
107*ac4cfaccSMatthew Wilcox (Oracle) #define update_mmu_cache(vma, addr, ptep) \
108*ac4cfaccSMatthew Wilcox (Oracle) 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
109fe6cb7b0SVineet Gupta 
1104a446b3dSDavid Hildenbrand /*
1114a446b3dSDavid Hildenbrand  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
1124a446b3dSDavid Hildenbrand  * are !pte_none() && !pte_present().
1134a446b3dSDavid Hildenbrand  *
1144a446b3dSDavid Hildenbrand  * Format of swap PTEs:
1154a446b3dSDavid Hildenbrand  *
1164a446b3dSDavid Hildenbrand  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
1174a446b3dSDavid Hildenbrand  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1184a446b3dSDavid Hildenbrand  *   <-------------- offset -------------> <--- zero --> E < type ->
1194a446b3dSDavid Hildenbrand  *
1204a446b3dSDavid Hildenbrand  *   E is the exclusive marker that is not stored in swap entries.
1214a446b3dSDavid Hildenbrand  *   The zero'ed bits include _PAGE_PRESENT.
122fe6cb7b0SVineet Gupta  */
123fe6cb7b0SVineet Gupta #define __swp_entry(type, off)		((swp_entry_t) \
124fe6cb7b0SVineet Gupta 					{ ((type) & 0x1f) | ((off) << 13) })
125fe6cb7b0SVineet Gupta 
126fe6cb7b0SVineet Gupta /* Decode a PTE containing swap "identifier "into constituents */
127fe6cb7b0SVineet Gupta #define __swp_type(pte_lookalike)	(((pte_lookalike).val) & 0x1f)
128fe6cb7b0SVineet Gupta #define __swp_offset(pte_lookalike)	((pte_lookalike).val >> 13)
129fe6cb7b0SVineet Gupta 
130fe6cb7b0SVineet Gupta #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
131fe6cb7b0SVineet Gupta #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
132fe6cb7b0SVineet Gupta 
pte_swp_exclusive(pte_t pte)1334a446b3dSDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte)
1344a446b3dSDavid Hildenbrand {
1354a446b3dSDavid Hildenbrand 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
1364a446b3dSDavid Hildenbrand }
1374a446b3dSDavid Hildenbrand 
1384a446b3dSDavid Hildenbrand PTE_BIT_FUNC(swp_mkexclusive, |= (_PAGE_SWP_EXCLUSIVE));
1394a446b3dSDavid Hildenbrand PTE_BIT_FUNC(swp_clear_exclusive, &= ~(_PAGE_SWP_EXCLUSIVE));
1404a446b3dSDavid Hildenbrand 
141fe6cb7b0SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
142fe6cb7b0SVineet Gupta #include <asm/hugepage.h>
143fe6cb7b0SVineet Gupta #endif
144fe6cb7b0SVineet Gupta 
145fe6cb7b0SVineet Gupta #endif /* __ASSEMBLY__ */
146fe6cb7b0SVineet Gupta 
147fe6cb7b0SVineet Gupta #endif
148