xref: /openbmc/linux/arch/powerpc/include/asm/tlb.h (revision 6c33a6f4)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	TLB shootdown specifics for powerpc
4  *
5  * Copyright (C) 2002 Anton Blanchard, IBM Corp.
6  * Copyright (C) 2002 Paul Mackerras, IBM Corp.
7  */
8 #ifndef _ASM_POWERPC_TLB_H
9 #define _ASM_POWERPC_TLB_H
10 #ifdef __KERNEL__
11 
12 #ifndef __powerpc64__
13 #include <asm/pgtable.h>
14 #endif
15 #include <asm/pgalloc.h>
16 #ifndef __powerpc64__
17 #include <asm/page.h>
18 #include <asm/mmu.h>
19 #endif
20 
21 #include <linux/pagemap.h>
22 
23 #define tlb_start_vma(tlb, vma)	do { } while (0)
24 #define tlb_end_vma(tlb, vma)	do { } while (0)
25 #define __tlb_remove_tlb_entry	__tlb_remove_tlb_entry
26 
27 #define tlb_flush tlb_flush
28 extern void tlb_flush(struct mmu_gather *tlb);
29 /*
30  * book3s:
31  * Hash does not use the linux page-tables, so we can avoid
32  * the TLB invalidate for page-table freeing, Radix otoh does use the
33  * page-tables and needs the TLBI.
34  *
35  * nohash:
36  * We still do TLB invalidate in the __pte_free_tlb routine before we
37  * add the page table pages to mmu gather table batch.
38  */
39 #define tlb_needs_table_invalidate()	radix_enabled()
40 
41 /* Get the generic bits... */
42 #include <asm-generic/tlb.h>
43 
44 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
45 			     unsigned long address);
46 
47 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
48 					  unsigned long address)
49 {
50 #ifdef CONFIG_PPC_BOOK3S_32
51 	if (pte_val(*ptep) & _PAGE_HASHPTE)
52 		flush_hash_entry(tlb->mm, ptep, address);
53 #endif
54 }
55 
56 #ifdef CONFIG_SMP
57 static inline int mm_is_core_local(struct mm_struct *mm)
58 {
59 	return cpumask_subset(mm_cpumask(mm),
60 			      topology_sibling_cpumask(smp_processor_id()));
61 }
62 
63 #ifdef CONFIG_PPC_BOOK3S_64
64 static inline int mm_is_thread_local(struct mm_struct *mm)
65 {
66 	if (atomic_read(&mm->context.active_cpus) > 1)
67 		return false;
68 	return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
69 }
70 static inline void mm_reset_thread_local(struct mm_struct *mm)
71 {
72 	WARN_ON(atomic_read(&mm->context.copros) > 0);
73 	/*
74 	 * It's possible for mm_access to take a reference on mm_users to
75 	 * access the remote mm from another thread, but it's not allowed
76 	 * to set mm_cpumask, so mm_users may be > 1 here.
77 	 */
78 	WARN_ON(current->mm != mm);
79 	atomic_set(&mm->context.active_cpus, 1);
80 	cpumask_clear(mm_cpumask(mm));
81 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
82 }
83 #else /* CONFIG_PPC_BOOK3S_64 */
84 static inline int mm_is_thread_local(struct mm_struct *mm)
85 {
86 	return cpumask_equal(mm_cpumask(mm),
87 			      cpumask_of(smp_processor_id()));
88 }
89 #endif /* !CONFIG_PPC_BOOK3S_64 */
90 
91 #else /* CONFIG_SMP */
92 static inline int mm_is_core_local(struct mm_struct *mm)
93 {
94 	return 1;
95 }
96 
97 static inline int mm_is_thread_local(struct mm_struct *mm)
98 {
99 	return 1;
100 }
101 #endif
102 
103 #endif /* __KERNEL__ */
104 #endif /* __ASM_POWERPC_TLB_H */
105