1 #ifndef _SPARC64_TLBFLUSH_H
2 #define _SPARC64_TLBFLUSH_H
3 
4 #include <asm/mmu_context.h>
5 
6 /* TSB flush operations. */
7 
8 #define TLB_BATCH_NR	192
9 
10 struct tlb_batch {
11 	bool huge;
12 	struct mm_struct *mm;
13 	unsigned long tlb_nr;
14 	unsigned long active;
15 	unsigned long vaddrs[TLB_BATCH_NR];
16 };
17 
18 void flush_tsb_kernel_range(unsigned long start, unsigned long end);
19 void flush_tsb_user(struct tlb_batch *tb);
20 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
21 
22 /* TLB flush operations. */
23 
24 static inline void flush_tlb_mm(struct mm_struct *mm)
25 {
26 }
27 
28 static inline void flush_tlb_page(struct vm_area_struct *vma,
29 				  unsigned long vmaddr)
30 {
31 }
32 
33 static inline void flush_tlb_range(struct vm_area_struct *vma,
34 				   unsigned long start, unsigned long end)
35 {
36 }
37 
38 void flush_tlb_kernel_range(unsigned long start, unsigned long end);
39 
40 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
41 
42 void flush_tlb_pending(void);
43 void arch_enter_lazy_mmu_mode(void);
44 void arch_leave_lazy_mmu_mode(void);
45 #define arch_flush_lazy_mmu_mode()      do {} while (0)
46 
47 /* Local cpu only.  */
48 void __flush_tlb_all(void);
49 void __flush_tlb_page(unsigned long context, unsigned long vaddr);
50 void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
51 
52 #ifndef CONFIG_SMP
53 
54 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
55 {
56 	__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
57 }
58 
59 #else /* CONFIG_SMP */
60 
61 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
62 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
63 
64 #define global_flush_tlb_page(mm, vaddr) \
65 	smp_flush_tlb_page(mm, vaddr)
66 
67 #endif /* ! CONFIG_SMP */
68 
69 #endif /* _SPARC64_TLBFLUSH_H */
70