1 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
2 #define _ASM_POWERPC_NOHASH_PGTABLE_H
3 
4 #if defined(CONFIG_PPC64)
5 #include <asm/nohash/64/pgtable.h>
6 #else
7 #include <asm/nohash/32/pgtable.h>
8 #endif
9 
10 #ifndef __ASSEMBLY__
11 
12 /* Generic accessors to PTE bits */
13 static inline int pte_write(pte_t pte)
14 {
15 	return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
16 }
17 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
18 static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
19 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
20 static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
21 static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
22 
23 #ifdef CONFIG_NUMA_BALANCING
24 /*
25  * These work without NUMA balancing but the kernel does not care. See the
26  * comment in include/asm-generic/pgtable.h . On powerpc, this will only
27  * work for user pages and always return true for kernel pages.
28  */
29 static inline int pte_protnone(pte_t pte)
30 {
31 	return (pte_val(pte) &
32 		(_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
33 }
34 
35 static inline int pmd_protnone(pmd_t pmd)
36 {
37 	return pte_protnone(pmd_pte(pmd));
38 }
39 #endif /* CONFIG_NUMA_BALANCING */
40 
41 static inline int pte_present(pte_t pte)
42 {
43 	return pte_val(pte) & _PAGE_PRESENT;
44 }
45 
46 /* Conversion functions: convert a page and protection to a page entry,
47  * and a page entry and page directory to the page they refer to.
48  *
49  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
50  * long for now.
51  */
52 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
53 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
54 		     pgprot_val(pgprot)); }
55 static inline unsigned long pte_pfn(pte_t pte)	{
56 	return pte_val(pte) >> PTE_RPN_SHIFT; }
57 
58 /* Generic modifiers for PTE bits */
59 static inline pte_t pte_wrprotect(pte_t pte)
60 {
61 	pte_basic_t ptev;
62 
63 	ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
64 	ptev |= _PAGE_RO;
65 	return __pte(ptev);
66 }
67 
68 static inline pte_t pte_mkclean(pte_t pte)
69 {
70 	return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
71 }
72 
73 static inline pte_t pte_mkold(pte_t pte)
74 {
75 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
76 }
77 
78 static inline pte_t pte_mkwrite(pte_t pte)
79 {
80 	pte_basic_t ptev;
81 
82 	ptev = pte_val(pte) & ~_PAGE_RO;
83 	ptev |= _PAGE_RW;
84 	return __pte(ptev);
85 }
86 
87 static inline pte_t pte_mkdirty(pte_t pte)
88 {
89 	return __pte(pte_val(pte) | _PAGE_DIRTY);
90 }
91 
92 static inline pte_t pte_mkyoung(pte_t pte)
93 {
94 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
95 }
96 
97 static inline pte_t pte_mkspecial(pte_t pte)
98 {
99 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
100 }
101 
102 static inline pte_t pte_mkhuge(pte_t pte)
103 {
104 	return pte;
105 }
106 
107 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
108 {
109 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
110 }
111 
112 /* Insert a PTE, top-level function is out of line. It uses an inline
113  * low level function in the respective pgtable-* files
114  */
115 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
116 		       pte_t pte);
117 
118 /* This low level function performs the actual PTE insertion
119  * Setting the PTE depends on the MMU type and other factors. It's
120  * an horrible mess that I'm not going to try to clean up now but
121  * I'm keeping it in one place rather than spread around
122  */
123 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
124 				pte_t *ptep, pte_t pte, int percpu)
125 {
126 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
127 	/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
128 	 * helper pte_update() which does an atomic update. We need to do that
129 	 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
130 	 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
131 	 * the hash bits instead (ie, same as the non-SMP case)
132 	 */
133 	if (percpu)
134 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
135 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
136 	else
137 		pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
138 
139 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
140 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
141 	 * can just store as long as we do the two halves in the right order
142 	 * with a barrier in between. This is possible because we take care,
143 	 * in the hash code, to pre-invalidate if the PTE was already hashed,
144 	 * which synchronizes us with any concurrent invalidation.
145 	 * In the percpu case, we also fallback to the simple update preserving
146 	 * the hash bits
147 	 */
148 	if (percpu) {
149 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
150 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
151 		return;
152 	}
153 #if _PAGE_HASHPTE != 0
154 	if (pte_val(*ptep) & _PAGE_HASHPTE)
155 		flush_hash_entry(mm, ptep, addr);
156 #endif
157 	__asm__ __volatile__("\
158 		stw%U0%X0 %2,%0\n\
159 		eieio\n\
160 		stw%U0%X0 %L2,%1"
161 	: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
162 	: "r" (pte) : "memory");
163 
164 #elif defined(CONFIG_PPC_STD_MMU_32)
165 	/* Third case is 32-bit hash table in UP mode, we need to preserve
166 	 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
167 	 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
168 	 * and see we need to keep track that this PTE needs invalidating
169 	 */
170 	*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
171 		      | (pte_val(pte) & ~_PAGE_HASHPTE));
172 
173 #else
174 	/* Anything else just stores the PTE normally. That covers all 64-bit
175 	 * cases, and 32-bit non-hash with 32-bit PTEs.
176 	 */
177 	*ptep = pte;
178 
179 #ifdef CONFIG_PPC_BOOK3E_64
180 	/*
181 	 * With hardware tablewalk, a sync is needed to ensure that
182 	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
183 	 * mappings, we can't tolerate spurious faults, so make sure
184 	 * the new PTE will be seen the first time.
185 	 */
186 	if (is_kernel_addr(addr))
187 		mb();
188 #endif
189 #endif
190 }
191 
192 
193 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
194 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
195 				 pte_t *ptep, pte_t entry, int dirty);
196 
197 /*
198  * Macro to mark a page protection value as "uncacheable".
199  */
200 
201 #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
202 			 _PAGE_WRITETHRU)
203 
204 #define pgprot_noncached(prot)	  (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
205 				            _PAGE_NO_CACHE | _PAGE_GUARDED))
206 
207 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
208 				            _PAGE_NO_CACHE))
209 
210 #define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
211 				            _PAGE_COHERENT))
212 
213 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
214 				            _PAGE_COHERENT | _PAGE_WRITETHRU))
215 
216 #define pgprot_cached_noncoherent(prot) \
217 		(__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
218 
219 #define pgprot_writecombine pgprot_noncached_wc
220 
221 struct file;
222 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
223 				     unsigned long size, pgprot_t vma_prot);
224 #define __HAVE_PHYS_MEM_ACCESS_PROT
225 
226 #ifdef CONFIG_HUGETLB_PAGE
227 static inline int hugepd_ok(hugepd_t hpd)
228 {
229 	return (hpd.pd > 0);
230 }
231 
232 static inline int pmd_huge(pmd_t pmd)
233 {
234 	return 0;
235 }
236 
237 static inline int pud_huge(pud_t pud)
238 {
239 	return 0;
240 }
241 
242 static inline int pgd_huge(pgd_t pgd)
243 {
244 	return 0;
245 }
246 #define pgd_huge		pgd_huge
247 
248 #define is_hugepd(hpd)		(hugepd_ok(hpd))
249 #endif
250 
251 #endif /* __ASSEMBLY__ */
252 #endif
253