xref: /openbmc/linux/arch/powerpc/include/asm/tlb.h (revision b627b4ed)
1 /*
2  *	TLB shootdown specifics for powerpc
3  *
4  * Copyright (C) 2002 Anton Blanchard, IBM Corp.
5  * Copyright (C) 2002 Paul Mackerras, IBM Corp.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 #ifndef _ASM_POWERPC_TLB_H
13 #define _ASM_POWERPC_TLB_H
14 #ifdef __KERNEL__
15 
16 #ifndef __powerpc64__
17 #include <asm/pgtable.h>
18 #endif
19 #include <asm/pgalloc.h>
20 #include <asm/tlbflush.h>
21 #ifndef __powerpc64__
22 #include <asm/page.h>
23 #include <asm/mmu.h>
24 #endif
25 
26 #include <linux/pagemap.h>
27 
28 struct mmu_gather;
29 
30 #define tlb_start_vma(tlb, vma)	do { } while (0)
31 #define tlb_end_vma(tlb, vma)	do { } while (0)
32 
33 #if !defined(CONFIG_PPC_STD_MMU)
34 
35 #define tlb_flush(tlb)			flush_tlb_mm((tlb)->mm)
36 
37 #elif defined(__powerpc64__)
38 
39 extern void pte_free_finish(void);
40 
41 static inline void tlb_flush(struct mmu_gather *tlb)
42 {
43 	struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
44 
45 	/* If there's a TLB batch pending, then we must flush it because the
46 	 * pages are going to be freed and we really don't want to have a CPU
47 	 * access a freed page because it has a stale TLB
48 	 */
49 	if (tlbbatch->index)
50 		__flush_tlb_pending(tlbbatch);
51 
52 	pte_free_finish();
53 }
54 
55 #else
56 
57 extern void tlb_flush(struct mmu_gather *tlb);
58 
59 #endif
60 
61 /* Get the generic bits... */
62 #include <asm-generic/tlb.h>
63 
64 #if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
65 
66 #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
67 
68 #else
69 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
70 			     unsigned long address);
71 
72 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
73 					unsigned long address)
74 {
75 	if (pte_val(*ptep) & _PAGE_HASHPTE)
76 		flush_hash_entry(tlb->mm, ptep, address);
77 }
78 
79 #endif
80 #endif /* __KERNEL__ */
81 #endif /* __ASM_POWERPC_TLB_H */
82