xref: /openbmc/linux/arch/powerpc/include/asm/tlb.h (revision a8c5cb99)
1 /*
2  *	TLB shootdown specifics for powerpc
3  *
4  * Copyright (C) 2002 Anton Blanchard, IBM Corp.
5  * Copyright (C) 2002 Paul Mackerras, IBM Corp.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 #ifndef _ASM_POWERPC_TLB_H
13 #define _ASM_POWERPC_TLB_H
14 #ifdef __KERNEL__
15 
16 #ifndef __powerpc64__
17 #include <asm/pgtable.h>
18 #endif
19 #include <asm/pgalloc.h>
20 #include <asm/tlbflush.h>
21 #ifndef __powerpc64__
22 #include <asm/page.h>
23 #include <asm/mmu.h>
24 #endif
25 
26 #include <linux/pagemap.h>
27 
28 #define tlb_start_vma(tlb, vma)	do { } while (0)
29 #define tlb_end_vma(tlb, vma)	do { } while (0)
30 #define __tlb_remove_tlb_entry	__tlb_remove_tlb_entry
31 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
32 
33 extern void tlb_flush(struct mmu_gather *tlb);
34 
35 /* Get the generic bits... */
36 #include <asm-generic/tlb.h>
37 
38 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
39 			     unsigned long address);
40 
41 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
42 					  unsigned long address)
43 {
44 #ifdef CONFIG_PPC_STD_MMU_32
45 	if (pte_val(*ptep) & _PAGE_HASHPTE)
46 		flush_hash_entry(tlb->mm, ptep, address);
47 #endif
48 }
49 
50 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
51 						     unsigned int page_size)
52 {
53 	if (!tlb->page_size)
54 		tlb->page_size = page_size;
55 	else if (tlb->page_size != page_size) {
56 		tlb_flush_mmu(tlb);
57 		/*
58 		 * update the page size after flush for the new
59 		 * mmu_gather.
60 		 */
61 		tlb->page_size = page_size;
62 	}
63 }
64 
65 #ifdef CONFIG_SMP
66 static inline int mm_is_core_local(struct mm_struct *mm)
67 {
68 	return cpumask_subset(mm_cpumask(mm),
69 			      topology_sibling_cpumask(smp_processor_id()));
70 }
71 
72 #ifdef CONFIG_PPC_BOOK3S_64
73 static inline int mm_is_thread_local(struct mm_struct *mm)
74 {
75 	if (atomic_read(&mm->context.active_cpus) > 1)
76 		return false;
77 	return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
78 }
79 static inline void mm_reset_thread_local(struct mm_struct *mm)
80 {
81 	WARN_ON(atomic_read(&mm->context.copros) > 0);
82 	/*
83 	 * It's possible for mm_access to take a reference on mm_users to
84 	 * access the remote mm from another thread, but it's not allowed
85 	 * to set mm_cpumask, so mm_users may be > 1 here.
86 	 */
87 	WARN_ON(current->mm != mm);
88 	atomic_set(&mm->context.active_cpus, 1);
89 	cpumask_clear(mm_cpumask(mm));
90 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
91 }
92 #else /* CONFIG_PPC_BOOK3S_64 */
93 static inline int mm_is_thread_local(struct mm_struct *mm)
94 {
95 	return cpumask_equal(mm_cpumask(mm),
96 			      cpumask_of(smp_processor_id()));
97 }
98 #endif /* !CONFIG_PPC_BOOK3S_64 */
99 
100 #else /* CONFIG_SMP */
101 static inline int mm_is_core_local(struct mm_struct *mm)
102 {
103 	return 1;
104 }
105 
106 static inline int mm_is_thread_local(struct mm_struct *mm)
107 {
108 	return 1;
109 }
110 #endif
111 
112 #endif /* __KERNEL__ */
113 #endif /* __ASM_POWERPC_TLB_H */
114