xref: /openbmc/linux/arch/riscv/mm/tlbflush.c (revision 864a0242)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <asm/sbi.h>
7 #include <asm/mmu_context.h>
8 
9 /*
10  * Flush entire TLB if number of entries to be flushed is greater
11  * than the threshold below.
12  */
13 static unsigned long tlb_flush_all_threshold __read_mostly = 64;
14 
local_flush_tlb_range_threshold_asid(unsigned long start,unsigned long size,unsigned long stride,unsigned long asid)15 static void local_flush_tlb_range_threshold_asid(unsigned long start,
16 						 unsigned long size,
17 						 unsigned long stride,
18 						 unsigned long asid)
19 {
20 	unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
21 	int i;
22 
23 	if (nr_ptes_in_range > tlb_flush_all_threshold) {
24 		local_flush_tlb_all_asid(asid);
25 		return;
26 	}
27 
28 	for (i = 0; i < nr_ptes_in_range; ++i) {
29 		local_flush_tlb_page_asid(start, asid);
30 		start += stride;
31 	}
32 }
33 
local_flush_tlb_range_asid(unsigned long start,unsigned long size,unsigned long stride,unsigned long asid)34 static inline void local_flush_tlb_range_asid(unsigned long start,
35 		unsigned long size, unsigned long stride, unsigned long asid)
36 {
37 	if (size <= stride)
38 		local_flush_tlb_page_asid(start, asid);
39 	else if (size == FLUSH_TLB_MAX_SIZE)
40 		local_flush_tlb_all_asid(asid);
41 	else
42 		local_flush_tlb_range_threshold_asid(start, size, stride, asid);
43 }
44 
45 /* Flush a range of kernel pages without broadcasting */
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)46 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
47 {
48 	local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
49 }
50 
__ipi_flush_tlb_all(void * info)51 static void __ipi_flush_tlb_all(void *info)
52 {
53 	local_flush_tlb_all();
54 }
55 
flush_tlb_all(void)56 void flush_tlb_all(void)
57 {
58 	if (riscv_use_ipi_for_rfence())
59 		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
60 	else
61 		sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
62 }
63 
64 struct flush_tlb_range_data {
65 	unsigned long asid;
66 	unsigned long start;
67 	unsigned long size;
68 	unsigned long stride;
69 };
70 
__ipi_flush_tlb_range_asid(void * info)71 static void __ipi_flush_tlb_range_asid(void *info)
72 {
73 	struct flush_tlb_range_data *d = info;
74 
75 	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
76 }
77 
__flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long size,unsigned long stride)78 static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
79 			      unsigned long size, unsigned long stride)
80 {
81 	struct flush_tlb_range_data ftd;
82 	const struct cpumask *cmask;
83 	unsigned long asid = FLUSH_TLB_NO_ASID;
84 	bool broadcast;
85 
86 	if (mm) {
87 		unsigned int cpuid;
88 
89 		cmask = mm_cpumask(mm);
90 		if (cpumask_empty(cmask))
91 			return;
92 
93 		cpuid = get_cpu();
94 		/* check if the tlbflush needs to be sent to other CPUs */
95 		broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
96 
97 		if (static_branch_unlikely(&use_asid_allocator))
98 			asid = atomic_long_read(&mm->context.id) & asid_mask;
99 	} else {
100 		cmask = cpu_online_mask;
101 		broadcast = true;
102 	}
103 
104 	if (broadcast) {
105 		if (riscv_use_ipi_for_rfence()) {
106 			ftd.asid = asid;
107 			ftd.start = start;
108 			ftd.size = size;
109 			ftd.stride = stride;
110 			on_each_cpu_mask(cmask,
111 					 __ipi_flush_tlb_range_asid,
112 					 &ftd, 1);
113 		} else
114 			sbi_remote_sfence_vma_asid(cmask,
115 						   start, size, asid);
116 	} else {
117 		local_flush_tlb_range_asid(start, size, stride, asid);
118 	}
119 
120 	if (mm)
121 		put_cpu();
122 }
123 
flush_tlb_mm(struct mm_struct * mm)124 void flush_tlb_mm(struct mm_struct *mm)
125 {
126 	__flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
127 }
128 
flush_tlb_mm_range(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned int page_size)129 void flush_tlb_mm_range(struct mm_struct *mm,
130 			unsigned long start, unsigned long end,
131 			unsigned int page_size)
132 {
133 	__flush_tlb_range(mm, start, end - start, page_size);
134 }
135 
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)136 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
137 {
138 	__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
139 }
140 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)141 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
142 		     unsigned long end)
143 {
144 	__flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
145 }
146 
flush_tlb_kernel_range(unsigned long start,unsigned long end)147 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
148 {
149 	__flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
150 }
151 
152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)153 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
154 			unsigned long end)
155 {
156 	__flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
157 }
158 #endif
159