xref: /openbmc/linux/arch/powerpc/include/asm/tlb.h (revision e953aeaa)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	TLB shootdown specifics for powerpc
4  *
5  * Copyright (C) 2002 Anton Blanchard, IBM Corp.
6  * Copyright (C) 2002 Paul Mackerras, IBM Corp.
7  */
8 #ifndef _ASM_POWERPC_TLB_H
9 #define _ASM_POWERPC_TLB_H
10 #ifdef __KERNEL__
11 
12 #ifndef __powerpc64__
13 #include <linux/pgtable.h>
14 #endif
15 #ifndef __powerpc64__
16 #include <asm/page.h>
17 #include <asm/mmu.h>
18 #endif
19 
20 #include <linux/pagemap.h>
21 
22 #define tlb_start_vma(tlb, vma)	do { } while (0)
23 #define tlb_end_vma(tlb, vma)	do { } while (0)
24 #define __tlb_remove_tlb_entry	__tlb_remove_tlb_entry
25 
26 #define tlb_flush tlb_flush
27 extern void tlb_flush(struct mmu_gather *tlb);
28 /*
29  * book3s:
30  * Hash does not use the linux page-tables, so we can avoid
31  * the TLB invalidate for page-table freeing, Radix otoh does use the
32  * page-tables and needs the TLBI.
33  *
34  * nohash:
35  * We still do TLB invalidate in the __pte_free_tlb routine before we
36  * add the page table pages to mmu gather table batch.
37  */
38 #define tlb_needs_table_invalidate()	radix_enabled()
39 
40 /* Get the generic bits... */
41 #include <asm-generic/tlb.h>
42 
43 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
44 			     unsigned long address);
45 
46 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
47 					  unsigned long address)
48 {
49 #ifdef CONFIG_PPC_BOOK3S_32
50 	if (pte_val(*ptep) & _PAGE_HASHPTE)
51 		flush_hash_entry(tlb->mm, ptep, address);
52 #endif
53 }
54 
55 #ifdef CONFIG_SMP
56 static inline int mm_is_core_local(struct mm_struct *mm)
57 {
58 	return cpumask_subset(mm_cpumask(mm),
59 			      topology_sibling_cpumask(smp_processor_id()));
60 }
61 
62 #ifdef CONFIG_PPC_BOOK3S_64
63 static inline int mm_is_thread_local(struct mm_struct *mm)
64 {
65 	if (atomic_read(&mm->context.active_cpus) > 1)
66 		return false;
67 	return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
68 }
69 static inline void mm_reset_thread_local(struct mm_struct *mm)
70 {
71 	WARN_ON(atomic_read(&mm->context.copros) > 0);
72 	/*
73 	 * It's possible for mm_access to take a reference on mm_users to
74 	 * access the remote mm from another thread, but it's not allowed
75 	 * to set mm_cpumask, so mm_users may be > 1 here.
76 	 */
77 	WARN_ON(current->mm != mm);
78 	atomic_set(&mm->context.active_cpus, 1);
79 	cpumask_clear(mm_cpumask(mm));
80 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
81 }
82 #else /* CONFIG_PPC_BOOK3S_64 */
83 static inline int mm_is_thread_local(struct mm_struct *mm)
84 {
85 	return cpumask_equal(mm_cpumask(mm),
86 			      cpumask_of(smp_processor_id()));
87 }
88 #endif /* !CONFIG_PPC_BOOK3S_64 */
89 
90 #else /* CONFIG_SMP */
91 static inline int mm_is_core_local(struct mm_struct *mm)
92 {
93 	return 1;
94 }
95 
96 static inline int mm_is_thread_local(struct mm_struct *mm)
97 {
98 	return 1;
99 }
100 #endif
101 
102 #endif /* __KERNEL__ */
103 #endif /* __ASM_POWERPC_TLB_H */
104