xref: /openbmc/linux/arch/csky/mm/tlb.c (revision 3a35093a)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/init.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 
9 #include <asm/mmu_context.h>
10 #include <asm/setup.h>
11 
12 /*
13  * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
14  * 1VPN -> 2PFN
15  */
16 #define TLB_ENTRY_SIZE		(PAGE_SIZE * 2)
17 #define TLB_ENTRY_SIZE_MASK	(PAGE_MASK << 1)
18 
19 void flush_tlb_all(void)
20 {
21 	tlb_invalid_all();
22 }
23 
24 void flush_tlb_mm(struct mm_struct *mm)
25 {
26 #ifdef CONFIG_CPU_HAS_TLBI
27 	asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
28 #else
29 	tlb_invalid_all();
30 #endif
31 }
32 
33 /*
34  * MMU operation regs only could invalid tlb entry in jtlb and we
35  * need change asid field to invalid I-utlb & D-utlb.
36  */
37 #ifndef CONFIG_CPU_HAS_TLBI
38 #define restore_asid_inv_utlb(oldpid, newpid) \
39 do { \
40 	if (oldpid == newpid) \
41 		write_mmu_entryhi(oldpid + 1); \
42 	write_mmu_entryhi(oldpid); \
43 } while (0)
44 #endif
45 
46 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
47 			unsigned long end)
48 {
49 	unsigned long newpid = cpu_asid(vma->vm_mm);
50 
51 	start &= TLB_ENTRY_SIZE_MASK;
52 	end   += TLB_ENTRY_SIZE - 1;
53 	end   &= TLB_ENTRY_SIZE_MASK;
54 
55 #ifdef CONFIG_CPU_HAS_TLBI
56 	while (start < end) {
57 		asm volatile("tlbi.vas %0"::"r"(start | newpid));
58 		start += 2*PAGE_SIZE;
59 	}
60 	sync_is();
61 #else
62 	{
63 	unsigned long flags, oldpid;
64 
65 	local_irq_save(flags);
66 	oldpid = read_mmu_entryhi() & ASID_MASK;
67 	while (start < end) {
68 		int idx;
69 
70 		write_mmu_entryhi(start | newpid);
71 		start += 2*PAGE_SIZE;
72 		tlb_probe();
73 		idx = read_mmu_index();
74 		if (idx >= 0)
75 			tlb_invalid_indexed();
76 	}
77 	restore_asid_inv_utlb(oldpid, newpid);
78 	local_irq_restore(flags);
79 	}
80 #endif
81 }
82 
83 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
84 {
85 	start &= TLB_ENTRY_SIZE_MASK;
86 	end   += TLB_ENTRY_SIZE - 1;
87 	end   &= TLB_ENTRY_SIZE_MASK;
88 
89 #ifdef CONFIG_CPU_HAS_TLBI
90 	while (start < end) {
91 		asm volatile("tlbi.vaas %0"::"r"(start));
92 		start += 2*PAGE_SIZE;
93 	}
94 	sync_is();
95 #else
96 	{
97 	unsigned long flags, oldpid;
98 
99 	local_irq_save(flags);
100 	oldpid = read_mmu_entryhi() & ASID_MASK;
101 	while (start < end) {
102 		int idx;
103 
104 		write_mmu_entryhi(start | oldpid);
105 		start += 2*PAGE_SIZE;
106 		tlb_probe();
107 		idx = read_mmu_index();
108 		if (idx >= 0)
109 			tlb_invalid_indexed();
110 	}
111 	restore_asid_inv_utlb(oldpid, oldpid);
112 	local_irq_restore(flags);
113 	}
114 #endif
115 }
116 
117 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
118 {
119 	int newpid = cpu_asid(vma->vm_mm);
120 
121 	addr &= TLB_ENTRY_SIZE_MASK;
122 
123 #ifdef CONFIG_CPU_HAS_TLBI
124 	asm volatile("tlbi.vas %0"::"r"(addr | newpid));
125 	sync_is();
126 #else
127 	{
128 	int oldpid, idx;
129 	unsigned long flags;
130 
131 	local_irq_save(flags);
132 	oldpid = read_mmu_entryhi() & ASID_MASK;
133 	write_mmu_entryhi(addr | newpid);
134 	tlb_probe();
135 	idx = read_mmu_index();
136 	if (idx >= 0)
137 		tlb_invalid_indexed();
138 
139 	restore_asid_inv_utlb(oldpid, newpid);
140 	local_irq_restore(flags);
141 	}
142 #endif
143 }
144 
145 void flush_tlb_one(unsigned long addr)
146 {
147 	addr &= TLB_ENTRY_SIZE_MASK;
148 
149 #ifdef CONFIG_CPU_HAS_TLBI
150 	asm volatile("tlbi.vaas %0"::"r"(addr));
151 	sync_is();
152 #else
153 	{
154 	int oldpid, idx;
155 	unsigned long flags;
156 
157 	local_irq_save(flags);
158 	oldpid = read_mmu_entryhi() & ASID_MASK;
159 	write_mmu_entryhi(addr | oldpid);
160 	tlb_probe();
161 	idx = read_mmu_index();
162 	if (idx >= 0)
163 		tlb_invalid_indexed();
164 
165 	restore_asid_inv_utlb(oldpid, oldpid);
166 	local_irq_restore(flags);
167 	}
168 #endif
169 }
170 EXPORT_SYMBOL(flush_tlb_one);
171