1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4 
5 #define __ARCH_USE_5LEVEL_HACK
6 #include <asm-generic/pgtable-nopmd.h>
7 
8 #include <asm/book3s/32/hash.h>
9 
10 /* And here we include common definitions */
11 
12 #define _PAGE_KERNEL_RO		0
13 #define _PAGE_KERNEL_ROX	(_PAGE_EXEC)
14 #define _PAGE_KERNEL_RW		(_PAGE_DIRTY | _PAGE_RW)
15 #define _PAGE_KERNEL_RWX	(_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
16 
17 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
18 
19 #ifndef __ASSEMBLY__
20 
21 static inline bool pte_user(pte_t pte)
22 {
23 	return pte_val(pte) & _PAGE_USER;
24 }
25 #endif /* __ASSEMBLY__ */
26 
27 /*
28  * Location of the PFN in the PTE. Most 32-bit platforms use the same
29  * as _PAGE_SHIFT here (ie, naturally aligned).
30  * Platform who don't just pre-define the value so we don't override it here.
31  */
32 #define PTE_RPN_SHIFT	(PAGE_SHIFT)
33 
34 /*
35  * The mask covered by the RPN must be a ULL on 32-bit platforms with
36  * 64-bit PTEs.
37  */
38 #ifdef CONFIG_PTE_64BIT
39 #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
40 #else
41 #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
42 #endif
43 
44 /*
45  * _PAGE_CHG_MASK masks of bits that are to be preserved across
46  * pgprot changes.
47  */
48 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
49 			 _PAGE_ACCESSED | _PAGE_SPECIAL)
50 
51 /*
52  * We define 2 sets of base prot bits, one for basic pages (ie,
53  * cacheable kernel and user pages) and one for non cacheable
54  * pages. We always set _PAGE_COHERENT when SMP is enabled or
55  * the processor might need it for DMA coherency.
56  */
57 #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
58 #define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
59 
60 /*
61  * Permission masks used to generate the __P and __S table.
62  *
63  * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
64  *
65  * Write permissions imply read permissions for now.
66  */
67 #define PAGE_NONE	__pgprot(_PAGE_BASE)
68 #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
69 #define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
70 #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
71 #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
72 #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
73 #define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
74 
75 /* Permission masks used for kernel mappings */
76 #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
77 #define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
78 #define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
79 				 _PAGE_NO_CACHE | _PAGE_GUARDED)
80 #define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
81 #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
82 #define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
83 
84 /*
85  * Protection used for kernel text. We want the debuggers to be able to
86  * set breakpoints anywhere, so don't write protect the kernel text
87  * on platforms where such control is possible.
88  */
89 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
90 	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
91 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
92 #else
93 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
94 #endif
95 
96 /* Make modules code happy. We don't set RO yet */
97 #define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
98 
99 /* Advertise special mapping type for AGP */
100 #define PAGE_AGP		(PAGE_KERNEL_NC)
101 #define HAVE_PAGE_AGP
102 
103 #define PTE_INDEX_SIZE	PTE_SHIFT
104 #define PMD_INDEX_SIZE	0
105 #define PUD_INDEX_SIZE	0
106 #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
107 
108 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
109 #define PUD_CACHE_INDEX	PUD_INDEX_SIZE
110 
111 #ifndef __ASSEMBLY__
112 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
113 #define PMD_TABLE_SIZE	0
114 #define PUD_TABLE_SIZE	0
115 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
116 #endif	/* __ASSEMBLY__ */
117 
118 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
119 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
120 
121 /*
122  * The normal case is that PTEs are 32-bits and we have a 1-page
123  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
124  *
125  * For any >32-bit physical address platform, we can use the following
126  * two level page table layout where the pgdir is 8KB and the MS 13 bits
127  * are an index to the second level table.  The combined pgdir/pmd first
128  * level has 2048 entries and the second level has 512 64-bit PTE entries.
129  * -Matt
130  */
131 /* PGDIR_SHIFT determines what a top-level page table entry can map */
132 #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
133 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
134 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
135 
136 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
137 
138 #ifndef __ASSEMBLY__
139 
140 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
141 
142 #endif /* !__ASSEMBLY__ */
143 
144 /*
145  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
146  * value (for now) on others, from where we can start layout kernel
147  * virtual space that goes below PKMAP and FIXMAP
148  */
149 #include <asm/fixmap.h>
150 
151 /*
152  * ioremap_bot starts at that address. Early ioremaps move down from there,
153  * until mem_init() at which point this becomes the top of the vmalloc
154  * and ioremap space
155  */
156 #ifdef CONFIG_HIGHMEM
157 #define IOREMAP_TOP	PKMAP_BASE
158 #else
159 #define IOREMAP_TOP	FIXADDR_START
160 #endif
161 
162 /* PPC32 shares vmalloc area with ioremap */
163 #define IOREMAP_START	VMALLOC_START
164 #define IOREMAP_END	VMALLOC_END
165 
166 /*
167  * Just any arbitrary offset to the start of the vmalloc VM area: the
168  * current 16MB value just means that there will be a 64MB "hole" after the
169  * physical memory until the kernel virtual memory starts.  That means that
170  * any out-of-bounds memory accesses will hopefully be caught.
171  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
172  * area for the same reason. ;)
173  *
174  * We no longer map larger than phys RAM with the BATs so we don't have
175  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
176  * about clashes between our early calls to ioremap() that start growing down
177  * from ioremap_base being run into the VM area allocations (growing upwards
178  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
179  * we actually run into our mappings setup in the early boot with the VM
180  * system.  This really does become a problem for machines with good amounts
181  * of RAM.  -- Cort
182  */
183 #define VMALLOC_OFFSET (0x1000000) /* 16M */
184 
185 /*
186  * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules
187  * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear
188  * memory shall not share segments.
189  */
190 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES)
191 #define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \
192 		       ~(VMALLOC_OFFSET - 1))
193 #else
194 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
195 #endif
196 
197 #ifdef CONFIG_KASAN_VMALLOC
198 #define VMALLOC_END	_ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
199 #else
200 #define VMALLOC_END	ioremap_bot
201 #endif
202 
203 #ifndef __ASSEMBLY__
204 #include <linux/sched.h>
205 #include <linux/threads.h>
206 
207 /* Bits to mask out from a PGD to get to the PUD page */
208 #define PGD_MASKED_BITS		0
209 
210 #define pte_ERROR(e) \
211 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
212 		(unsigned long long)pte_val(e))
213 #define pgd_ERROR(e) \
214 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
215 /*
216  * Bits in a linux-style PTE.  These match the bits in the
217  * (hardware-defined) PowerPC PTE as closely as possible.
218  */
219 
220 #define pte_clear(mm, addr, ptep) \
221 	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
222 
223 #define pmd_none(pmd)		(!pmd_val(pmd))
224 #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
225 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
226 static inline void pmd_clear(pmd_t *pmdp)
227 {
228 	*pmdp = __pmd(0);
229 }
230 
231 
232 /*
233  * When flushing the tlb entry for a page, we also need to flush the hash
234  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
235  */
236 extern int flush_hash_pages(unsigned context, unsigned long va,
237 			    unsigned long pmdval, int count);
238 
239 /* Add an HPTE to the hash table */
240 extern void add_hash_page(unsigned context, unsigned long va,
241 			  unsigned long pmdval);
242 
243 /* Flush an entry from the TLB/hash table */
244 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
245 			     unsigned long address);
246 
247 /*
248  * PTE updates. This function is called whenever an existing
249  * valid PTE is updated. This does -not- include set_pte_at()
250  * which nowadays only sets a new PTE.
251  *
252  * Depending on the type of MMU, we may need to use atomic updates
253  * and the PTE may be either 32 or 64 bit wide. In the later case,
254  * when using atomic updates, only the low part of the PTE is
255  * accessed atomically.
256  *
257  * In addition, on 44x, we also maintain a global flag indicating
258  * that an executable user mapping was modified, which is needed
259  * to properly flush the virtually tagged instruction cache of
260  * those implementations.
261  */
262 #ifndef CONFIG_PTE_64BIT
263 static inline unsigned long pte_update(pte_t *p,
264 				       unsigned long clr,
265 				       unsigned long set)
266 {
267 	unsigned long old, tmp;
268 
269 	__asm__ __volatile__("\
270 1:	lwarx	%0,0,%3\n\
271 	andc	%1,%0,%4\n\
272 	or	%1,%1,%5\n"
273 "	stwcx.	%1,0,%3\n\
274 	bne-	1b"
275 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
276 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
277 	: "cc" );
278 
279 	return old;
280 }
281 #else /* CONFIG_PTE_64BIT */
282 static inline unsigned long long pte_update(pte_t *p,
283 					    unsigned long clr,
284 					    unsigned long set)
285 {
286 	unsigned long long old;
287 	unsigned long tmp;
288 
289 	__asm__ __volatile__("\
290 1:	lwarx	%L0,0,%4\n\
291 	lwzx	%0,0,%3\n\
292 	andc	%1,%L0,%5\n\
293 	or	%1,%1,%6\n"
294 "	stwcx.	%1,0,%4\n\
295 	bne-	1b"
296 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
297 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
298 	: "cc" );
299 
300 	return old;
301 }
302 #endif /* CONFIG_PTE_64BIT */
303 
304 /*
305  * 2.6 calls this without flushing the TLB entry; this is wrong
306  * for our hash-based implementation, we fix that up here.
307  */
308 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
309 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
310 {
311 	unsigned long old;
312 	old = pte_update(ptep, _PAGE_ACCESSED, 0);
313 	if (old & _PAGE_HASHPTE) {
314 		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
315 		flush_hash_pages(context, addr, ptephys, 1);
316 	}
317 	return (old & _PAGE_ACCESSED) != 0;
318 }
319 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
320 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
321 
322 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
323 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
324 				       pte_t *ptep)
325 {
326 	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
327 }
328 
329 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
330 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
331 				      pte_t *ptep)
332 {
333 	pte_update(ptep, _PAGE_RW, 0);
334 }
335 
336 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
337 					   pte_t *ptep, pte_t entry,
338 					   unsigned long address,
339 					   int psize)
340 {
341 	unsigned long set = pte_val(entry) &
342 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
343 
344 	pte_update(ptep, 0, set);
345 
346 	flush_tlb_page(vma, address);
347 }
348 
349 #define __HAVE_ARCH_PTE_SAME
350 #define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
351 
352 #define pmd_page_vaddr(pmd)	\
353 	((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
354 #define pmd_page(pmd)		\
355 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
356 
357 /* to find an entry in a kernel page-table-directory */
358 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
359 
360 /* to find an entry in a page-table-directory */
361 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
362 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
363 
364 /* Find an entry in the third-level page table.. */
365 #define pte_index(address)		\
366 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
367 #define pte_offset_kernel(dir, addr)	\
368 	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
369 #define pte_offset_map(dir, addr)	pte_offset_kernel((dir), (addr))
370 static inline void pte_unmap(pte_t *pte) { }
371 
372 /*
373  * Encode and decode a swap entry.
374  * Note that the bits we use in a PTE for representing a swap entry
375  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
376  *   -- paulus
377  */
378 #define __swp_type(entry)		((entry).val & 0x1f)
379 #define __swp_offset(entry)		((entry).val >> 5)
380 #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
381 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
382 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
383 
384 /* Generic accessors to PTE bits */
385 static inline int pte_write(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_RW);}
386 static inline int pte_read(pte_t pte)		{ return 1; }
387 static inline int pte_dirty(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_DIRTY); }
388 static inline int pte_young(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_ACCESSED); }
389 static inline int pte_special(pte_t pte)	{ return !!(pte_val(pte) & _PAGE_SPECIAL); }
390 static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
391 static inline bool pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_EXEC; }
392 
393 static inline int pte_present(pte_t pte)
394 {
395 	return pte_val(pte) & _PAGE_PRESENT;
396 }
397 
398 static inline bool pte_hw_valid(pte_t pte)
399 {
400 	return pte_val(pte) & _PAGE_PRESENT;
401 }
402 
403 static inline bool pte_hashpte(pte_t pte)
404 {
405 	return !!(pte_val(pte) & _PAGE_HASHPTE);
406 }
407 
408 static inline bool pte_ci(pte_t pte)
409 {
410 	return !!(pte_val(pte) & _PAGE_NO_CACHE);
411 }
412 
413 /*
414  * We only find page table entry in the last level
415  * Hence no need for other accessors
416  */
417 #define pte_access_permitted pte_access_permitted
418 static inline bool pte_access_permitted(pte_t pte, bool write)
419 {
420 	/*
421 	 * A read-only access is controlled by _PAGE_USER bit.
422 	 * We have _PAGE_READ set for WRITE and EXECUTE
423 	 */
424 	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
425 		return false;
426 
427 	if (write && !pte_write(pte))
428 		return false;
429 
430 	return true;
431 }
432 
433 /* Conversion functions: convert a page and protection to a page entry,
434  * and a page entry and page directory to the page they refer to.
435  *
436  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
437  * long for now.
438  */
439 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
440 {
441 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
442 		     pgprot_val(pgprot));
443 }
444 
445 static inline unsigned long pte_pfn(pte_t pte)
446 {
447 	return pte_val(pte) >> PTE_RPN_SHIFT;
448 }
449 
450 /* Generic modifiers for PTE bits */
451 static inline pte_t pte_wrprotect(pte_t pte)
452 {
453 	return __pte(pte_val(pte) & ~_PAGE_RW);
454 }
455 
456 static inline pte_t pte_exprotect(pte_t pte)
457 {
458 	return __pte(pte_val(pte) & ~_PAGE_EXEC);
459 }
460 
461 static inline pte_t pte_mkclean(pte_t pte)
462 {
463 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
464 }
465 
466 static inline pte_t pte_mkold(pte_t pte)
467 {
468 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
469 }
470 
471 static inline pte_t pte_mkexec(pte_t pte)
472 {
473 	return __pte(pte_val(pte) | _PAGE_EXEC);
474 }
475 
476 static inline pte_t pte_mkpte(pte_t pte)
477 {
478 	return pte;
479 }
480 
481 static inline pte_t pte_mkwrite(pte_t pte)
482 {
483 	return __pte(pte_val(pte) | _PAGE_RW);
484 }
485 
486 static inline pte_t pte_mkdirty(pte_t pte)
487 {
488 	return __pte(pte_val(pte) | _PAGE_DIRTY);
489 }
490 
491 static inline pte_t pte_mkyoung(pte_t pte)
492 {
493 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
494 }
495 
496 static inline pte_t pte_mkspecial(pte_t pte)
497 {
498 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
499 }
500 
501 static inline pte_t pte_mkhuge(pte_t pte)
502 {
503 	return pte;
504 }
505 
506 static inline pte_t pte_mkprivileged(pte_t pte)
507 {
508 	return __pte(pte_val(pte) & ~_PAGE_USER);
509 }
510 
511 static inline pte_t pte_mkuser(pte_t pte)
512 {
513 	return __pte(pte_val(pte) | _PAGE_USER);
514 }
515 
516 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
517 {
518 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
519 }
520 
521 
522 
523 /* This low level function performs the actual PTE insertion
524  * Setting the PTE depends on the MMU type and other factors. It's
525  * an horrible mess that I'm not going to try to clean up now but
526  * I'm keeping it in one place rather than spread around
527  */
528 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
529 				pte_t *ptep, pte_t pte, int percpu)
530 {
531 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
532 	/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
533 	 * helper pte_update() which does an atomic update. We need to do that
534 	 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
535 	 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
536 	 * the hash bits instead (ie, same as the non-SMP case)
537 	 */
538 	if (percpu)
539 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
540 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
541 	else
542 		pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
543 
544 #elif defined(CONFIG_PTE_64BIT)
545 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
546 	 * can just store as long as we do the two halves in the right order
547 	 * with a barrier in between. This is possible because we take care,
548 	 * in the hash code, to pre-invalidate if the PTE was already hashed,
549 	 * which synchronizes us with any concurrent invalidation.
550 	 * In the percpu case, we also fallback to the simple update preserving
551 	 * the hash bits
552 	 */
553 	if (percpu) {
554 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
555 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
556 		return;
557 	}
558 	if (pte_val(*ptep) & _PAGE_HASHPTE)
559 		flush_hash_entry(mm, ptep, addr);
560 	__asm__ __volatile__("\
561 		stw%U0%X0 %2,%0\n\
562 		eieio\n\
563 		stw%U0%X0 %L2,%1"
564 	: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
565 	: "r" (pte) : "memory");
566 
567 #else
568 	/* Third case is 32-bit hash table in UP mode, we need to preserve
569 	 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
570 	 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
571 	 * and see we need to keep track that this PTE needs invalidating
572 	 */
573 	*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
574 		      | (pte_val(pte) & ~_PAGE_HASHPTE));
575 #endif
576 }
577 
578 /*
579  * Macro to mark a page protection value as "uncacheable".
580  */
581 
582 #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
583 			 _PAGE_WRITETHRU)
584 
585 #define pgprot_noncached pgprot_noncached
586 static inline pgprot_t pgprot_noncached(pgprot_t prot)
587 {
588 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
589 			_PAGE_NO_CACHE | _PAGE_GUARDED);
590 }
591 
592 #define pgprot_noncached_wc pgprot_noncached_wc
593 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
594 {
595 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
596 			_PAGE_NO_CACHE);
597 }
598 
599 #define pgprot_cached pgprot_cached
600 static inline pgprot_t pgprot_cached(pgprot_t prot)
601 {
602 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
603 			_PAGE_COHERENT);
604 }
605 
606 #define pgprot_cached_wthru pgprot_cached_wthru
607 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
608 {
609 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
610 			_PAGE_COHERENT | _PAGE_WRITETHRU);
611 }
612 
613 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
614 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
615 {
616 	return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
617 }
618 
619 #define pgprot_writecombine pgprot_writecombine
620 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
621 {
622 	return pgprot_noncached_wc(prot);
623 }
624 
625 #endif /* !__ASSEMBLY__ */
626 
627 #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
628