xref: /openbmc/linux/arch/csky/mm/tlb.c (revision 9b93eb47)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/init.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 
9 #include <asm/mmu_context.h>
10 #include <asm/pgtable.h>
11 #include <asm/setup.h>
12 
13 #define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE
14 
15 void flush_tlb_all(void)
16 {
17 	tlb_invalid_all();
18 }
19 
20 void flush_tlb_mm(struct mm_struct *mm)
21 {
22 	int cpu = smp_processor_id();
23 
24 	if (cpu_context(cpu, mm) != 0)
25 		drop_mmu_context(mm, cpu);
26 
27 	tlb_invalid_all();
28 }
29 
30 #define restore_asid_inv_utlb(oldpid, newpid) \
31 do { \
32 	if ((oldpid & ASID_MASK) == newpid) \
33 		write_mmu_entryhi(oldpid + 1); \
34 	write_mmu_entryhi(oldpid); \
35 } while (0)
36 
37 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
38 			   unsigned long end)
39 {
40 	struct mm_struct *mm = vma->vm_mm;
41 	int cpu = smp_processor_id();
42 
43 	if (cpu_context(cpu, mm) != 0) {
44 		unsigned long size, flags;
45 		int newpid = cpu_asid(cpu, mm);
46 
47 		local_irq_save(flags);
48 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
49 		size = (size + 1) >> 1;
50 		if (size <= CSKY_TLB_SIZE/2) {
51 			start &= (PAGE_MASK << 1);
52 			end += ((PAGE_SIZE << 1) - 1);
53 			end &= (PAGE_MASK << 1);
54 #ifdef CONFIG_CPU_HAS_TLBI
55 			while (start < end) {
56 				asm volatile("tlbi.vaas %0"
57 					     ::"r"(start | newpid));
58 				start += (PAGE_SIZE << 1);
59 			}
60 			sync_is();
61 #else
62 			{
63 			int oldpid = read_mmu_entryhi();
64 
65 			while (start < end) {
66 				int idx;
67 
68 				write_mmu_entryhi(start | newpid);
69 				start += (PAGE_SIZE << 1);
70 				tlb_probe();
71 				idx = read_mmu_index();
72 				if (idx >= 0)
73 					tlb_invalid_indexed();
74 			}
75 			restore_asid_inv_utlb(oldpid, newpid);
76 			}
77 #endif
78 		} else {
79 			drop_mmu_context(mm, cpu);
80 		}
81 		local_irq_restore(flags);
82 	}
83 }
84 
85 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
86 {
87 	unsigned long size, flags;
88 
89 	local_irq_save(flags);
90 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
91 	if (size <= CSKY_TLB_SIZE) {
92 		start &= (PAGE_MASK << 1);
93 		end += ((PAGE_SIZE << 1) - 1);
94 		end &= (PAGE_MASK << 1);
95 #ifdef CONFIG_CPU_HAS_TLBI
96 		while (start < end) {
97 			asm volatile("tlbi.vaas %0"::"r"(start));
98 			start += (PAGE_SIZE << 1);
99 		}
100 		sync_is();
101 #else
102 		{
103 		int oldpid = read_mmu_entryhi();
104 
105 		while (start < end) {
106 			int idx;
107 
108 			write_mmu_entryhi(start);
109 			start += (PAGE_SIZE << 1);
110 			tlb_probe();
111 			idx = read_mmu_index();
112 			if (idx >= 0)
113 				tlb_invalid_indexed();
114 		}
115 		restore_asid_inv_utlb(oldpid, 0);
116 		}
117 #endif
118 	} else {
119 		flush_tlb_all();
120 	}
121 
122 	local_irq_restore(flags);
123 }
124 
125 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
126 {
127 	int cpu = smp_processor_id();
128 	int newpid = cpu_asid(cpu, vma->vm_mm);
129 
130 	if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
131 		page &= (PAGE_MASK << 1);
132 
133 #ifdef CONFIG_CPU_HAS_TLBI
134 		asm volatile("tlbi.vaas %0"::"r"(page | newpid));
135 		sync_is();
136 #else
137 		{
138 		int oldpid, idx;
139 		unsigned long flags;
140 
141 		local_irq_save(flags);
142 		oldpid = read_mmu_entryhi();
143 		write_mmu_entryhi(page | newpid);
144 		tlb_probe();
145 		idx = read_mmu_index();
146 		if (idx >= 0)
147 			tlb_invalid_indexed();
148 
149 		restore_asid_inv_utlb(oldpid, newpid);
150 		local_irq_restore(flags);
151 		}
152 #endif
153 	}
154 }
155 
156 /*
157  * Remove one kernel space TLB entry.  This entry is assumed to be marked
158  * global so we don't do the ASID thing.
159  */
160 void flush_tlb_one(unsigned long page)
161 {
162 	int oldpid;
163 
164 	oldpid = read_mmu_entryhi();
165 	page &= (PAGE_MASK << 1);
166 
167 #ifdef CONFIG_CPU_HAS_TLBI
168 	page = page | (oldpid & 0xfff);
169 	asm volatile("tlbi.vaas %0"::"r"(page));
170 	sync_is();
171 #else
172 	{
173 	int idx;
174 	unsigned long flags;
175 
176 	page = page | (oldpid & 0xff);
177 
178 	local_irq_save(flags);
179 	write_mmu_entryhi(page);
180 	tlb_probe();
181 	idx = read_mmu_index();
182 	if (idx >= 0)
183 		tlb_invalid_indexed();
184 	restore_asid_inv_utlb(oldpid, oldpid);
185 	local_irq_restore(flags);
186 	}
187 #endif
188 }
189 EXPORT_SYMBOL(flush_tlb_one);
190 
191 /* show current 32 jtlbs */
192 void show_jtlb_table(void)
193 {
194 	unsigned long flags;
195 	int entryhi, entrylo0, entrylo1;
196 	int entry;
197 	int oldpid;
198 
199 	local_irq_save(flags);
200 	entry = 0;
201 	pr_info("\n\n\n");
202 
203 	oldpid = read_mmu_entryhi();
204 	while (entry < CSKY_TLB_SIZE) {
205 		write_mmu_index(entry);
206 		tlb_read();
207 		entryhi = read_mmu_entryhi();
208 		entrylo0 = read_mmu_entrylo0();
209 		entrylo0 = entrylo0;
210 		entrylo1 = read_mmu_entrylo1();
211 		entrylo1 = entrylo1;
212 		pr_info("jtlb[%d]:	entryhi - 0x%x;	entrylo0 - 0x%x;"
213 			"	entrylo1 - 0x%x\n",
214 			entry, entryhi, entrylo0, entrylo1);
215 		entry++;
216 	}
217 	write_mmu_entryhi(oldpid);
218 	local_irq_restore(flags);
219 }
220