1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f1f3347dSVineet Gupta /*
3288ff7deSVineet Gupta * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
4f1f3347dSVineet Gupta *
5f1f3347dSVineet Gupta * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6f1f3347dSVineet Gupta *
7f1f3347dSVineet Gupta */
8f1f3347dSVineet Gupta
9f1f3347dSVineet Gupta #include <linux/module.h>
10483e9bcbSVineet Gupta #include <linux/bug.h>
11589ee628SIngo Molnar #include <linux/mm_types.h>
12589ee628SIngo Molnar
13f1f3347dSVineet Gupta #include <asm/arcregs.h>
14d79e678dSVineet Gupta #include <asm/setup.h>
15f1f3347dSVineet Gupta #include <asm/mmu_context.h>
16da1677b0SVineet Gupta #include <asm/mmu.h>
17f1f3347dSVineet Gupta
18f1f3347dSVineet Gupta /* A copy of the ASID from the PID reg is kept in asid_cache */
1963eca94cSVineet Gupta DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
20cc562d2eSVineet Gupta
2172d861f2SVineet Gupta static struct cpuinfo_arc_mmu {
2272d861f2SVineet Gupta unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
2372d861f2SVineet Gupta } mmuinfo;
24b5ddb6d5SVineet Gupta
25d79e678dSVineet Gupta /*
26d79e678dSVineet Gupta * Utility Routine to erase a J-TLB entry
27483e9bcbSVineet Gupta * Caller needs to setup Index Reg (manually or via getIndex)
28d79e678dSVineet Gupta */
__tlb_entry_erase(void)29483e9bcbSVineet Gupta static inline void __tlb_entry_erase(void)
30d79e678dSVineet Gupta {
31d79e678dSVineet Gupta write_aux_reg(ARC_REG_TLBPD1, 0);
325a364c2aSVineet Gupta
335a364c2aSVineet Gupta if (is_pae40_enabled())
345a364c2aSVineet Gupta write_aux_reg(ARC_REG_TLBPD1HI, 0);
355a364c2aSVineet Gupta
36d79e678dSVineet Gupta write_aux_reg(ARC_REG_TLBPD0, 0);
37d79e678dSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
38d79e678dSVineet Gupta }
39d79e678dSVineet Gupta
utlb_invalidate(void)401355ea2eSVineet Gupta static void utlb_invalidate(void)
411355ea2eSVineet Gupta {
421355ea2eSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
431355ea2eSVineet Gupta }
441355ea2eSVineet Gupta
45288ff7deSVineet Gupta #ifdef CONFIG_ARC_MMU_V3
46d7a512bfSVineet Gupta
tlb_entry_lkup(unsigned long vaddr_n_asid)47483e9bcbSVineet Gupta static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
48483e9bcbSVineet Gupta {
49483e9bcbSVineet Gupta unsigned int idx;
50483e9bcbSVineet Gupta
51483e9bcbSVineet Gupta write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
52483e9bcbSVineet Gupta
53483e9bcbSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
54483e9bcbSVineet Gupta idx = read_aux_reg(ARC_REG_TLBINDEX);
55483e9bcbSVineet Gupta
56483e9bcbSVineet Gupta return idx;
57483e9bcbSVineet Gupta }
58483e9bcbSVineet Gupta
tlb_entry_erase(unsigned int vaddr_n_asid)59d79e678dSVineet Gupta static void tlb_entry_erase(unsigned int vaddr_n_asid)
60d79e678dSVineet Gupta {
61d79e678dSVineet Gupta unsigned int idx;
62d79e678dSVineet Gupta
63d79e678dSVineet Gupta /* Locate the TLB entry for this vaddr + ASID */
64483e9bcbSVineet Gupta idx = tlb_entry_lkup(vaddr_n_asid);
65d79e678dSVineet Gupta
66d79e678dSVineet Gupta /* No error means entry found, zero it out */
67d79e678dSVineet Gupta if (likely(!(idx & TLB_LKUP_ERR))) {
68d79e678dSVineet Gupta __tlb_entry_erase();
69483e9bcbSVineet Gupta } else {
70d79e678dSVineet Gupta /* Duplicate entry error */
71483e9bcbSVineet Gupta WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
72d79e678dSVineet Gupta vaddr_n_asid);
73d79e678dSVineet Gupta }
74d79e678dSVineet Gupta }
75d79e678dSVineet Gupta
tlb_entry_insert(unsigned int pd0,phys_addr_t pd1)76366440eeSVineet Gupta static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
77483e9bcbSVineet Gupta {
78483e9bcbSVineet Gupta unsigned int idx;
79483e9bcbSVineet Gupta
80483e9bcbSVineet Gupta /*
81483e9bcbSVineet Gupta * First verify if entry for this vaddr+ASID already exists
82483e9bcbSVineet Gupta * This also sets up PD0 (vaddr, ASID..) for final commit
83483e9bcbSVineet Gupta */
84483e9bcbSVineet Gupta idx = tlb_entry_lkup(pd0);
85483e9bcbSVineet Gupta
86483e9bcbSVineet Gupta /*
87483e9bcbSVineet Gupta * If Not already present get a free slot from MMU.
88483e9bcbSVineet Gupta * Otherwise, Probe would have located the entry and set INDEX Reg
89483e9bcbSVineet Gupta * with existing location. This will cause Write CMD to over-write
90483e9bcbSVineet Gupta * existing entry with new PD0 and PD1
91483e9bcbSVineet Gupta */
92483e9bcbSVineet Gupta if (likely(idx & TLB_LKUP_ERR))
93483e9bcbSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
94483e9bcbSVineet Gupta
95483e9bcbSVineet Gupta /* setup the other half of TLB entry (pfn, rwx..) */
96483e9bcbSVineet Gupta write_aux_reg(ARC_REG_TLBPD1, pd1);
97483e9bcbSVineet Gupta
98483e9bcbSVineet Gupta /*
99483e9bcbSVineet Gupta * Commit the Entry to MMU
1007423cc0cSAdam Buchbinder * It doesn't sound safe to use the TLBWriteNI cmd here
101483e9bcbSVineet Gupta * which doesn't flush uTLBs. I'd rather be safe than sorry.
102483e9bcbSVineet Gupta */
103483e9bcbSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
104483e9bcbSVineet Gupta }
105483e9bcbSVineet Gupta
106288ff7deSVineet Gupta #else /* MMUv4 */
107d7a512bfSVineet Gupta
tlb_entry_erase(unsigned int vaddr_n_asid)108d7a512bfSVineet Gupta static void tlb_entry_erase(unsigned int vaddr_n_asid)
109d7a512bfSVineet Gupta {
110d7a512bfSVineet Gupta write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
111d7a512bfSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
112d7a512bfSVineet Gupta }
113d7a512bfSVineet Gupta
tlb_entry_insert(unsigned int pd0,phys_addr_t pd1)114366440eeSVineet Gupta static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
115d7a512bfSVineet Gupta {
116d7a512bfSVineet Gupta write_aux_reg(ARC_REG_TLBPD0, pd0);
1175a364c2aSVineet Gupta
118366440eeSVineet Gupta if (!is_pae40_enabled()) {
119366440eeSVineet Gupta write_aux_reg(ARC_REG_TLBPD1, pd1);
120366440eeSVineet Gupta } else {
121366440eeSVineet Gupta write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
1225a364c2aSVineet Gupta write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
123366440eeSVineet Gupta }
1245a364c2aSVineet Gupta
125d7a512bfSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
126d7a512bfSVineet Gupta }
127d7a512bfSVineet Gupta
128d7a512bfSVineet Gupta #endif
129d7a512bfSVineet Gupta
130d79e678dSVineet Gupta /*
131d79e678dSVineet Gupta * Un-conditionally (without lookup) erase the entire MMU contents
132d79e678dSVineet Gupta */
133d79e678dSVineet Gupta
local_flush_tlb_all(void)134d79e678dSVineet Gupta noinline void local_flush_tlb_all(void)
135d79e678dSVineet Gupta {
13672d861f2SVineet Gupta struct cpuinfo_arc_mmu *mmu = &mmuinfo;
137d79e678dSVineet Gupta unsigned long flags;
138d79e678dSVineet Gupta unsigned int entry;
139b598e17fSVineet Gupta int num_tlb = mmu->sets * mmu->ways;
140d79e678dSVineet Gupta
141d79e678dSVineet Gupta local_irq_save(flags);
142d79e678dSVineet Gupta
143d79e678dSVineet Gupta /* Load PD0 and PD1 with template for a Blank Entry */
144d79e678dSVineet Gupta write_aux_reg(ARC_REG_TLBPD1, 0);
1455a364c2aSVineet Gupta
1465a364c2aSVineet Gupta if (is_pae40_enabled())
1475a364c2aSVineet Gupta write_aux_reg(ARC_REG_TLBPD1HI, 0);
1485a364c2aSVineet Gupta
149d79e678dSVineet Gupta write_aux_reg(ARC_REG_TLBPD0, 0);
150d79e678dSVineet Gupta
151b598e17fSVineet Gupta for (entry = 0; entry < num_tlb; entry++) {
152d79e678dSVineet Gupta /* write this entry to the TLB */
153d79e678dSVineet Gupta write_aux_reg(ARC_REG_TLBINDEX, entry);
1541355ea2eSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
155d79e678dSVineet Gupta }
156d79e678dSVineet Gupta
157fe6c1b86SVineet Gupta if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
158fe6c1b86SVineet Gupta const int stlb_idx = 0x800;
159fe6c1b86SVineet Gupta
160fe6c1b86SVineet Gupta /* Blank sTLB entry */
161fe6c1b86SVineet Gupta write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
162fe6c1b86SVineet Gupta
163fe6c1b86SVineet Gupta for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
164fe6c1b86SVineet Gupta write_aux_reg(ARC_REG_TLBINDEX, entry);
1651355ea2eSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
166fe6c1b86SVineet Gupta }
167fe6c1b86SVineet Gupta }
168fe6c1b86SVineet Gupta
169d79e678dSVineet Gupta utlb_invalidate();
170d79e678dSVineet Gupta
171d79e678dSVineet Gupta local_irq_restore(flags);
172d79e678dSVineet Gupta }
173d79e678dSVineet Gupta
174d79e678dSVineet Gupta /*
1755f840df5SFlavio Suligoi * Flush the entire MM for userland. The fastest way is to move to Next ASID
176d79e678dSVineet Gupta */
local_flush_tlb_mm(struct mm_struct * mm)177d79e678dSVineet Gupta noinline void local_flush_tlb_mm(struct mm_struct *mm)
178d79e678dSVineet Gupta {
179d79e678dSVineet Gupta /*
180d79e678dSVineet Gupta * Small optimisation courtesy IA64
181d79e678dSVineet Gupta * flush_mm called during fork,exit,munmap etc, multiple times as well.
182d79e678dSVineet Gupta * Only for fork( ) do we need to move parent to a new MMU ctxt,
183d79e678dSVineet Gupta * all other cases are NOPs, hence this check.
184d79e678dSVineet Gupta */
185d79e678dSVineet Gupta if (atomic_read(&mm->mm_users) == 0)
186d79e678dSVineet Gupta return;
187d79e678dSVineet Gupta
188d79e678dSVineet Gupta /*
1893daa48d1SVineet Gupta * - Move to a new ASID, but only if the mm is still wired in
1903daa48d1SVineet Gupta * (Android Binder ended up calling this for vma->mm != tsk->mm,
1913daa48d1SVineet Gupta * causing h/w - s/w ASID to get out of sync)
1923daa48d1SVineet Gupta * - Also get_new_mmu_context() new implementation allocates a new
1933daa48d1SVineet Gupta * ASID only if it is not allocated already - so unallocate first
194d79e678dSVineet Gupta */
195d79e678dSVineet Gupta destroy_context(mm);
1963daa48d1SVineet Gupta if (current->mm == mm)
197d79e678dSVineet Gupta get_new_mmu_context(mm);
198d79e678dSVineet Gupta }
199d79e678dSVineet Gupta
200d79e678dSVineet Gupta /*
201d79e678dSVineet Gupta * Flush a Range of TLB entries for userland.
202d79e678dSVineet Gupta * @start is inclusive, while @end is exclusive
203d79e678dSVineet Gupta * Difference between this and Kernel Range Flush is
204d79e678dSVineet Gupta * -Here the fastest way (if range is too large) is to move to next ASID
205d79e678dSVineet Gupta * without doing any explicit Shootdown
2065f840df5SFlavio Suligoi * -In case of kernel Flush, entry has to be shot down explicitly
207d79e678dSVineet Gupta */
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)208d79e678dSVineet Gupta void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
209d79e678dSVineet Gupta unsigned long end)
210d79e678dSVineet Gupta {
21163eca94cSVineet Gupta const unsigned int cpu = smp_processor_id();
212d79e678dSVineet Gupta unsigned long flags;
213d79e678dSVineet Gupta
214d79e678dSVineet Gupta /* If range @start to @end is more than 32 TLB entries deep,
215d79e678dSVineet Gupta * its better to move to a new ASID rather than searching for
216d79e678dSVineet Gupta * individual entries and then shooting them down
217d79e678dSVineet Gupta *
218d79e678dSVineet Gupta * The calc above is rough, doesn't account for unaligned parts,
219d79e678dSVineet Gupta * since this is heuristics based anyways
220d79e678dSVineet Gupta */
221d79e678dSVineet Gupta if (unlikely((end - start) >= PAGE_SIZE * 32)) {
222d79e678dSVineet Gupta local_flush_tlb_mm(vma->vm_mm);
223d79e678dSVineet Gupta return;
224d79e678dSVineet Gupta }
225d79e678dSVineet Gupta
226d79e678dSVineet Gupta /*
227d79e678dSVineet Gupta * @start moved to page start: this alone suffices for checking
228d79e678dSVineet Gupta * loop end condition below, w/o need for aligning @end to end
229d79e678dSVineet Gupta * e.g. 2000 to 4001 will anyhow loop twice
230d79e678dSVineet Gupta */
231d79e678dSVineet Gupta start &= PAGE_MASK;
232d79e678dSVineet Gupta
233d79e678dSVineet Gupta local_irq_save(flags);
234d79e678dSVineet Gupta
23563eca94cSVineet Gupta if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
236d79e678dSVineet Gupta while (start < end) {
23763eca94cSVineet Gupta tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
238d79e678dSVineet Gupta start += PAGE_SIZE;
239d79e678dSVineet Gupta }
240d79e678dSVineet Gupta }
241d79e678dSVineet Gupta
242d79e678dSVineet Gupta local_irq_restore(flags);
243d79e678dSVineet Gupta }
244d79e678dSVineet Gupta
245d79e678dSVineet Gupta /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
246d79e678dSVineet Gupta * @start, @end interpreted as kvaddr
247d79e678dSVineet Gupta * Interestingly, shared TLB entries can also be flushed using just
248d79e678dSVineet Gupta * @start,@end alone (interpreted as user vaddr), although technically SASID
249d79e678dSVineet Gupta * is also needed. However our smart TLbProbe lookup takes care of that.
250d79e678dSVineet Gupta */
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)251d79e678dSVineet Gupta void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
252d79e678dSVineet Gupta {
253d79e678dSVineet Gupta unsigned long flags;
254d79e678dSVineet Gupta
255d79e678dSVineet Gupta /* exactly same as above, except for TLB entry not taking ASID */
256d79e678dSVineet Gupta
257d79e678dSVineet Gupta if (unlikely((end - start) >= PAGE_SIZE * 32)) {
258d79e678dSVineet Gupta local_flush_tlb_all();
259d79e678dSVineet Gupta return;
260d79e678dSVineet Gupta }
261d79e678dSVineet Gupta
262d79e678dSVineet Gupta start &= PAGE_MASK;
263d79e678dSVineet Gupta
264d79e678dSVineet Gupta local_irq_save(flags);
265d79e678dSVineet Gupta while (start < end) {
266d79e678dSVineet Gupta tlb_entry_erase(start);
267d79e678dSVineet Gupta start += PAGE_SIZE;
268d79e678dSVineet Gupta }
269d79e678dSVineet Gupta
270d79e678dSVineet Gupta local_irq_restore(flags);
271d79e678dSVineet Gupta }
272d79e678dSVineet Gupta
273d79e678dSVineet Gupta /*
274d79e678dSVineet Gupta * Delete TLB entry in MMU for a given page (??? address)
275d79e678dSVineet Gupta * NOTE One TLB entry contains translation for single PAGE
276d79e678dSVineet Gupta */
277d79e678dSVineet Gupta
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)278d79e678dSVineet Gupta void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
279d79e678dSVineet Gupta {
28063eca94cSVineet Gupta const unsigned int cpu = smp_processor_id();
281d79e678dSVineet Gupta unsigned long flags;
282d79e678dSVineet Gupta
283d79e678dSVineet Gupta /* Note that it is critical that interrupts are DISABLED between
284d79e678dSVineet Gupta * checking the ASID and using it flush the TLB entry
285d79e678dSVineet Gupta */
286d79e678dSVineet Gupta local_irq_save(flags);
287d79e678dSVineet Gupta
28863eca94cSVineet Gupta if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
28963eca94cSVineet Gupta tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
290d79e678dSVineet Gupta }
291d79e678dSVineet Gupta
292d79e678dSVineet Gupta local_irq_restore(flags);
293d79e678dSVineet Gupta }
294cc562d2eSVineet Gupta
2955ea72a90SVineet Gupta #ifdef CONFIG_SMP
2965ea72a90SVineet Gupta
2975ea72a90SVineet Gupta struct tlb_args {
2985ea72a90SVineet Gupta struct vm_area_struct *ta_vma;
2995ea72a90SVineet Gupta unsigned long ta_start;
3005ea72a90SVineet Gupta unsigned long ta_end;
3015ea72a90SVineet Gupta };
3025ea72a90SVineet Gupta
ipi_flush_tlb_page(void * arg)3035ea72a90SVineet Gupta static inline void ipi_flush_tlb_page(void *arg)
3045ea72a90SVineet Gupta {
3055ea72a90SVineet Gupta struct tlb_args *ta = arg;
3065ea72a90SVineet Gupta
3075ea72a90SVineet Gupta local_flush_tlb_page(ta->ta_vma, ta->ta_start);
3085ea72a90SVineet Gupta }
3095ea72a90SVineet Gupta
ipi_flush_tlb_range(void * arg)3105ea72a90SVineet Gupta static inline void ipi_flush_tlb_range(void *arg)
3115ea72a90SVineet Gupta {
3125ea72a90SVineet Gupta struct tlb_args *ta = arg;
3135ea72a90SVineet Gupta
3145ea72a90SVineet Gupta local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
3155ea72a90SVineet Gupta }
3165ea72a90SVineet Gupta
317c7119d56SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ipi_flush_pmd_tlb_range(void * arg)318c7119d56SVineet Gupta static inline void ipi_flush_pmd_tlb_range(void *arg)
319c7119d56SVineet Gupta {
320c7119d56SVineet Gupta struct tlb_args *ta = arg;
321c7119d56SVineet Gupta
322c7119d56SVineet Gupta local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
323c7119d56SVineet Gupta }
324c7119d56SVineet Gupta #endif
325c7119d56SVineet Gupta
ipi_flush_tlb_kernel_range(void * arg)3265ea72a90SVineet Gupta static inline void ipi_flush_tlb_kernel_range(void *arg)
3275ea72a90SVineet Gupta {
3285ea72a90SVineet Gupta struct tlb_args *ta = (struct tlb_args *)arg;
3295ea72a90SVineet Gupta
3305ea72a90SVineet Gupta local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
3315ea72a90SVineet Gupta }
3325ea72a90SVineet Gupta
flush_tlb_all(void)3335ea72a90SVineet Gupta void flush_tlb_all(void)
3345ea72a90SVineet Gupta {
3355ea72a90SVineet Gupta on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
3365ea72a90SVineet Gupta }
3375ea72a90SVineet Gupta
flush_tlb_mm(struct mm_struct * mm)3385ea72a90SVineet Gupta void flush_tlb_mm(struct mm_struct *mm)
3395ea72a90SVineet Gupta {
3405ea72a90SVineet Gupta on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
3415ea72a90SVineet Gupta mm, 1);
3425ea72a90SVineet Gupta }
3435ea72a90SVineet Gupta
flush_tlb_page(struct vm_area_struct * vma,unsigned long uaddr)3445ea72a90SVineet Gupta void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3455ea72a90SVineet Gupta {
3465ea72a90SVineet Gupta struct tlb_args ta = {
3475ea72a90SVineet Gupta .ta_vma = vma,
3485ea72a90SVineet Gupta .ta_start = uaddr
3495ea72a90SVineet Gupta };
3505ea72a90SVineet Gupta
3515ea72a90SVineet Gupta on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
3525ea72a90SVineet Gupta }
3535ea72a90SVineet Gupta
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)3545ea72a90SVineet Gupta void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
3555ea72a90SVineet Gupta unsigned long end)
3565ea72a90SVineet Gupta {
3575ea72a90SVineet Gupta struct tlb_args ta = {
3585ea72a90SVineet Gupta .ta_vma = vma,
3595ea72a90SVineet Gupta .ta_start = start,
3605ea72a90SVineet Gupta .ta_end = end
3615ea72a90SVineet Gupta };
3625ea72a90SVineet Gupta
3635ea72a90SVineet Gupta on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
3645ea72a90SVineet Gupta }
3655ea72a90SVineet Gupta
366c7119d56SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)367c7119d56SVineet Gupta void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
368c7119d56SVineet Gupta unsigned long end)
369c7119d56SVineet Gupta {
370c7119d56SVineet Gupta struct tlb_args ta = {
371c7119d56SVineet Gupta .ta_vma = vma,
372c7119d56SVineet Gupta .ta_start = start,
373c7119d56SVineet Gupta .ta_end = end
374c7119d56SVineet Gupta };
375c7119d56SVineet Gupta
376c7119d56SVineet Gupta on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
377c7119d56SVineet Gupta }
378c7119d56SVineet Gupta #endif
379c7119d56SVineet Gupta
flush_tlb_kernel_range(unsigned long start,unsigned long end)3805ea72a90SVineet Gupta void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3815ea72a90SVineet Gupta {
3825ea72a90SVineet Gupta struct tlb_args ta = {
3835ea72a90SVineet Gupta .ta_start = start,
3845ea72a90SVineet Gupta .ta_end = end
3855ea72a90SVineet Gupta };
3865ea72a90SVineet Gupta
3875ea72a90SVineet Gupta on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
3885ea72a90SVineet Gupta }
3895ea72a90SVineet Gupta #endif
3905ea72a90SVineet Gupta
391cc562d2eSVineet Gupta /*
392cc562d2eSVineet Gupta * Routine to create a TLB entry
393cc562d2eSVineet Gupta */
create_tlb(struct vm_area_struct * vma,unsigned long vaddr,pte_t * ptep)3944d369680SVineet Gupta static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
395cc562d2eSVineet Gupta {
396cc562d2eSVineet Gupta unsigned long flags;
397483e9bcbSVineet Gupta unsigned int asid_or_sasid, rwx;
3985a364c2aSVineet Gupta unsigned long pd0;
399366440eeSVineet Gupta phys_addr_t pd1;
400cc562d2eSVineet Gupta
401cc562d2eSVineet Gupta /*
402cc562d2eSVineet Gupta * create_tlb() assumes that current->mm == vma->mm, since
403cc562d2eSVineet Gupta * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
404cc562d2eSVineet Gupta * -completes the lazy write to SASID reg (again valid for curr tsk)
405cc562d2eSVineet Gupta *
406cc562d2eSVineet Gupta * Removing the assumption involves
407cc562d2eSVineet Gupta * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
408cc562d2eSVineet Gupta * -More importantly it makes this handler inconsistent with fast-path
409cc562d2eSVineet Gupta * TLB Refill handler which always deals with "current"
410cc562d2eSVineet Gupta *
411cc562d2eSVineet Gupta * Lets see the use cases when current->mm != vma->mm and we land here
412cc562d2eSVineet Gupta * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
413cc562d2eSVineet Gupta * Here VM wants to pre-install a TLB entry for user stack while
414cc562d2eSVineet Gupta * current->mm still points to pre-execve mm (hence the condition).
415cc562d2eSVineet Gupta * However the stack vaddr is soon relocated (randomization) and
416cc562d2eSVineet Gupta * move_page_tables() tries to undo that TLB entry.
417cc562d2eSVineet Gupta * Thus not creating TLB entry is not any worse.
418cc562d2eSVineet Gupta *
419cc562d2eSVineet Gupta * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
420cc562d2eSVineet Gupta * breakpoint in debugged task. Not creating a TLB now is not
421cc562d2eSVineet Gupta * performance critical.
422cc562d2eSVineet Gupta *
423cc562d2eSVineet Gupta * Both the cases above are not good enough for code churn.
424cc562d2eSVineet Gupta */
425cc562d2eSVineet Gupta if (current->active_mm != vma->vm_mm)
426cc562d2eSVineet Gupta return;
427cc562d2eSVineet Gupta
428cc562d2eSVineet Gupta local_irq_save(flags);
429cc562d2eSVineet Gupta
43028b4af72SVineet Gupta vaddr &= PAGE_MASK;
431cc562d2eSVineet Gupta
432cc562d2eSVineet Gupta /* update this PTE credentials */
433cc562d2eSVineet Gupta pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
434cc562d2eSVineet Gupta
435d091fcb9SVineet Gupta /* Create HW TLB(PD0,PD1) from PTE */
436cc562d2eSVineet Gupta
437cc562d2eSVineet Gupta /* ASID for this task */
438cc562d2eSVineet Gupta asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
439cc562d2eSVineet Gupta
44028b4af72SVineet Gupta pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
441cc562d2eSVineet Gupta
44264b703efSVineet Gupta /*
44364b703efSVineet Gupta * ARC MMU provides fully orthogonal access bits for K/U mode,
44464b703efSVineet Gupta * however Linux only saves 1 set to save PTE real-estate
44564b703efSVineet Gupta * Here we convert 3 PTE bits into 6 MMU bits:
44664b703efSVineet Gupta * -Kernel only entries have Kr Kw Kx 0 0 0
44764b703efSVineet Gupta * -User entries have mirrored K and U bits
44864b703efSVineet Gupta */
44964b703efSVineet Gupta rwx = pte_val(*ptep) & PTE_BITS_RWX;
45064b703efSVineet Gupta
45164b703efSVineet Gupta if (pte_val(*ptep) & _PAGE_GLOBAL)
45264b703efSVineet Gupta rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
45364b703efSVineet Gupta else
45464b703efSVineet Gupta rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
45564b703efSVineet Gupta
456483e9bcbSVineet Gupta pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
457cc562d2eSVineet Gupta
458483e9bcbSVineet Gupta tlb_entry_insert(pd0, pd1);
459cc562d2eSVineet Gupta
460cc562d2eSVineet Gupta local_irq_restore(flags);
461cc562d2eSVineet Gupta }
462cc562d2eSVineet Gupta
463eacd0e95SVineet Gupta /*
464eacd0e95SVineet Gupta * Called at the end of pagefault, for a userspace mapped page
465eacd0e95SVineet Gupta * -pre-install the corresponding TLB entry into MMU
4664102b533SVineet Gupta * -Finalize the delayed D-cache flush of kernel mapping of page due to
4674102b533SVineet Gupta * flush_dcache_page(), copy_user_page()
4684102b533SVineet Gupta *
4694102b533SVineet Gupta * Note that flush (when done) involves both WBACK - so physical page is
4704102b533SVineet Gupta * in sync as well as INV - so any non-congruent aliases don't remain
471cc562d2eSVineet Gupta */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long vaddr_unaligned,pte_t * ptep,unsigned int nr)472*ac4cfaccSMatthew Wilcox (Oracle) void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
473*ac4cfaccSMatthew Wilcox (Oracle) unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
474cc562d2eSVineet Gupta {
47524603fddSVineet Gupta unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
476c5f756d8SVladimir Isaev phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
47729b93c68SVineet Gupta struct page *page = pfn_to_page(pte_pfn(*ptep));
478cc562d2eSVineet Gupta
47924603fddSVineet Gupta create_tlb(vma, vaddr, ptep);
48024603fddSVineet Gupta
48129b93c68SVineet Gupta if (page == ZERO_PAGE(0)) {
48229b93c68SVineet Gupta return;
48329b93c68SVineet Gupta }
48429b93c68SVineet Gupta
4854102b533SVineet Gupta /*
4864102b533SVineet Gupta * Exec page : Independent of aliasing/page-color considerations,
4874102b533SVineet Gupta * since icache doesn't snoop dcache on ARC, any dirty
4884102b533SVineet Gupta * K-mapping of a code page needs to be wback+inv so that
4894102b533SVineet Gupta * icache fetch by userspace sees code correctly.
4904102b533SVineet Gupta * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
4914102b533SVineet Gupta * so userspace sees the right data.
4924102b533SVineet Gupta * (Avoids the flush for Non-exec + congruent mapping case)
4934102b533SVineet Gupta */
4943e87974dSVineet Gupta if ((vma->vm_flags & VM_EXEC) ||
4953e87974dSVineet Gupta addr_not_cache_congruent(paddr, vaddr)) {
496*ac4cfaccSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
497*ac4cfaccSMatthew Wilcox (Oracle) int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
498eacd0e95SVineet Gupta if (dirty) {
499*ac4cfaccSMatthew Wilcox (Oracle) unsigned long offset = offset_in_folio(folio, paddr);
500*ac4cfaccSMatthew Wilcox (Oracle) nr = folio_nr_pages(folio);
501*ac4cfaccSMatthew Wilcox (Oracle) paddr -= offset;
502*ac4cfaccSMatthew Wilcox (Oracle) vaddr -= offset;
50361a16348SVineet Gupta /* wback + inv dcache lines (K-mapping) */
504*ac4cfaccSMatthew Wilcox (Oracle) __flush_dcache_pages(paddr, paddr, nr);
5054102b533SVineet Gupta
50661a16348SVineet Gupta /* invalidate any existing icache lines (U-mapping) */
5074102b533SVineet Gupta if (vma->vm_flags & VM_EXEC)
508*ac4cfaccSMatthew Wilcox (Oracle) __inv_icache_pages(paddr, vaddr, nr);
50924603fddSVineet Gupta }
510cc562d2eSVineet Gupta }
511eacd0e95SVineet Gupta }
512cc562d2eSVineet Gupta
513fe6c1b86SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
514fe6c1b86SVineet Gupta
515fe6c1b86SVineet Gupta /*
516fe6c1b86SVineet Gupta * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
517fe6c1b86SVineet Gupta * support.
518fe6c1b86SVineet Gupta *
519fe6c1b86SVineet Gupta * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
5207423cc0cSAdam Buchbinder * new bit "SZ" in TLB page descriptor to distinguish between them.
521fe6c1b86SVineet Gupta * Super Page size is configurable in hardware (4K to 16M), but fixed once
522fe6c1b86SVineet Gupta * RTL builds.
523fe6c1b86SVineet Gupta *
5245f840df5SFlavio Suligoi * The exact THP size a Linux configuration will support is a function of:
525fe6c1b86SVineet Gupta * - MMU page size (typical 8K, RTL fixed)
526fe6c1b86SVineet Gupta * - software page walker address split between PGD:PTE:PFN (typical
527fe6c1b86SVineet Gupta * 11:8:13, but can be changed with 1 line)
528fe6c1b86SVineet Gupta * So for above default, THP size supported is 8K * (2^8) = 2M
529fe6c1b86SVineet Gupta *
530fe6c1b86SVineet Gupta * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
531fe6c1b86SVineet Gupta * reduces to 1 level (as PTE is folded into PGD and canonically referred
532fe6c1b86SVineet Gupta * to as PMD).
533fe6c1b86SVineet Gupta * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
534fe6c1b86SVineet Gupta */
535fe6c1b86SVineet Gupta
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)536fe6c1b86SVineet Gupta void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
537fe6c1b86SVineet Gupta pmd_t *pmd)
538fe6c1b86SVineet Gupta {
539fe6c1b86SVineet Gupta pte_t pte = __pte(pmd_val(*pmd));
540*ac4cfaccSMatthew Wilcox (Oracle) update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
541fe6c1b86SVineet Gupta }
542fe6c1b86SVineet Gupta
local_flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)543c7119d56SVineet Gupta void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
544722fe8fdSVineet Gupta unsigned long end)
545722fe8fdSVineet Gupta {
546722fe8fdSVineet Gupta unsigned int cpu;
547722fe8fdSVineet Gupta unsigned long flags;
548722fe8fdSVineet Gupta
549722fe8fdSVineet Gupta local_irq_save(flags);
550722fe8fdSVineet Gupta
551722fe8fdSVineet Gupta cpu = smp_processor_id();
552722fe8fdSVineet Gupta
553722fe8fdSVineet Gupta if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
554722fe8fdSVineet Gupta unsigned int asid = hw_pid(vma->vm_mm, cpu);
555722fe8fdSVineet Gupta
556722fe8fdSVineet Gupta /* No need to loop here: this will always be for 1 Huge Page */
557722fe8fdSVineet Gupta tlb_entry_erase(start | _PAGE_HW_SZ | asid);
558722fe8fdSVineet Gupta }
559722fe8fdSVineet Gupta
560722fe8fdSVineet Gupta local_irq_restore(flags);
561722fe8fdSVineet Gupta }
562722fe8fdSVineet Gupta
563fe6c1b86SVineet Gupta #endif
564fe6c1b86SVineet Gupta
5655f840df5SFlavio Suligoi /* Read the Cache Build Configuration Registers, Decode them and save into
566cc562d2eSVineet Gupta * the cpuinfo structure for later use.
567cc562d2eSVineet Gupta * No Validation is done here, simply read/convert the BCRs
568cc562d2eSVineet Gupta */
arc_mmu_mumbojumbo(int c,char * buf,int len)569fad84e39SVineet Gupta int arc_mmu_mumbojumbo(int c, char *buf, int len)
570cc562d2eSVineet Gupta {
57172d861f2SVineet Gupta struct cpuinfo_arc_mmu *mmu = &mmuinfo;
57272d861f2SVineet Gupta unsigned int bcr, u_dtlb, u_itlb, sasid;
57372d861f2SVineet Gupta struct bcr_mmu_3 *mmu3;
57472d861f2SVineet Gupta struct bcr_mmu_4 *mmu4;
57572d861f2SVineet Gupta char super_pg[64] = "";
57672d861f2SVineet Gupta int n = 0;
577cc562d2eSVineet Gupta
57872d861f2SVineet Gupta bcr = read_aux_reg(ARC_REG_MMU_BCR);
57972d861f2SVineet Gupta mmu->ver = (bcr >> 24);
580cc562d2eSVineet Gupta
581288ff7deSVineet Gupta if (is_isa_arcompact() && mmu->ver == 3) {
58272d861f2SVineet Gupta mmu3 = (struct bcr_mmu_3 *)&bcr;
58340b552d9SVineet Gupta mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
584cc562d2eSVineet Gupta mmu->sets = 1 << mmu3->sets;
585cc562d2eSVineet Gupta mmu->ways = 1 << mmu3->ways;
58672d861f2SVineet Gupta u_dtlb = mmu3->u_dtlb;
58772d861f2SVineet Gupta u_itlb = mmu3->u_itlb;
58872d861f2SVineet Gupta sasid = mmu3->sasid;
589d7a512bfSVineet Gupta } else {
59072d861f2SVineet Gupta mmu4 = (struct bcr_mmu_4 *)&bcr;
591d7a512bfSVineet Gupta mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
592d7a512bfSVineet Gupta mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
593d7a512bfSVineet Gupta mmu->sets = 64 << mmu4->n_entry;
594d7a512bfSVineet Gupta mmu->ways = mmu4->n_ways * 2;
59572d861f2SVineet Gupta u_dtlb = mmu4->u_dtlb * 4;
59672d861f2SVineet Gupta u_itlb = mmu4->u_itlb * 4;
59772d861f2SVineet Gupta sasid = mmu4->sasid;
59872d861f2SVineet Gupta mmu->pae = mmu4->pae;
599cc562d2eSVineet Gupta }
600cc562d2eSVineet Gupta
60172d861f2SVineet Gupta if (mmu->s_pg_sz_m)
60272d861f2SVineet Gupta scnprintf(super_pg, 64, "/%dM%s",
60372d861f2SVineet Gupta mmu->s_pg_sz_m,
60472d861f2SVineet Gupta IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
605af617428SVineet Gupta
606af617428SVineet Gupta n += scnprintf(buf + n, len - n,
60772d861f2SVineet Gupta "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
60872d861f2SVineet Gupta mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
60972d861f2SVineet Gupta mmu->sets, mmu->ways,
61072d861f2SVineet Gupta u_dtlb, u_itlb,
61172d861f2SVineet Gupta IS_AVAIL1(sasid, ", SASID"),
61272d861f2SVineet Gupta IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
613af617428SVineet Gupta
614fad84e39SVineet Gupta return n;
615af617428SVineet Gupta }
616af617428SVineet Gupta
pae40_exist_but_not_enab(void)617b5ddb6d5SVineet Gupta int pae40_exist_but_not_enab(void)
618b5ddb6d5SVineet Gupta {
61972d861f2SVineet Gupta return mmuinfo.pae && !is_pae40_enabled();
620b5ddb6d5SVineet Gupta }
621b5ddb6d5SVineet Gupta
arc_mmu_init(void)622ce759956SPaul Gortmaker void arc_mmu_init(void)
623cc562d2eSVineet Gupta {
62472d861f2SVineet Gupta struct cpuinfo_arc_mmu *mmu = &mmuinfo;
62592d44128SVineet Gupta int compat = 0;
626af617428SVineet Gupta
62715ca68a9SNoam Camus /*
6285f840df5SFlavio Suligoi * Can't be done in processor.h due to header include dependencies
62915ca68a9SNoam Camus */
63015ca68a9SNoam Camus BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
63115ca68a9SNoam Camus
6328bcf2c48SNoam Camus /*
6338bcf2c48SNoam Camus * stack top size sanity check,
6345f840df5SFlavio Suligoi * Can't be done in processor.h due to header include dependencies
6358bcf2c48SNoam Camus */
6368bcf2c48SNoam Camus BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
6378bcf2c48SNoam Camus
63892d44128SVineet Gupta /*
63992d44128SVineet Gupta * Ensure that MMU features assumed by kernel exist in hardware.
640288ff7deSVineet Gupta * - For older ARC700 cpus, only v3 supported
641288ff7deSVineet Gupta * - For HS cpus, v4 was baseline and v5 is backwards compatible
64292d44128SVineet Gupta * (will run older software).
643af617428SVineet Gupta */
644288ff7deSVineet Gupta if (is_isa_arcompact() && mmu->ver == 3)
64592d44128SVineet Gupta compat = 1;
646288ff7deSVineet Gupta else if (is_isa_arcv2() && mmu->ver >= 4)
64792d44128SVineet Gupta compat = 1;
64892d44128SVineet Gupta
649288ff7deSVineet Gupta if (!compat)
650288ff7deSVineet Gupta panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
651af617428SVineet Gupta
65240b552d9SVineet Gupta if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
653af617428SVineet Gupta panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
654af617428SVineet Gupta
6556ce18798SVineet Gupta if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
6566ce18798SVineet Gupta mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
6576ce18798SVineet Gupta panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
6586ce18798SVineet Gupta (unsigned long)TO_MB(HPAGE_PMD_SIZE));
6596ce18798SVineet Gupta
6605a364c2aSVineet Gupta if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
6615a364c2aSVineet Gupta panic("Hardware doesn't support PAE40\n");
6625a364c2aSVineet Gupta
66389d0d424SVineet Gupta /* Enable the MMU with ASID 0 */
66489d0d424SVineet Gupta mmu_setup_asid(NULL, 0);
66541195d23SVineet Gupta
66689d0d424SVineet Gupta /* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
66789d0d424SVineet Gupta mmu_setup_pgd(NULL, swapper_pg_dir);
668b5ddb6d5SVineet Gupta
669b5ddb6d5SVineet Gupta if (pae40_exist_but_not_enab())
670b5ddb6d5SVineet Gupta write_aux_reg(ARC_REG_TLBPD1HI, 0);
671cc562d2eSVineet Gupta }
672cc562d2eSVineet Gupta
673cc562d2eSVineet Gupta /*
674cc562d2eSVineet Gupta * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
675cc562d2eSVineet Gupta * The mapping is Column-first.
676cc562d2eSVineet Gupta * --------------------- -----------
677cc562d2eSVineet Gupta * |way0|way1|way2|way3| |way0|way1|
678cc562d2eSVineet Gupta * --------------------- -----------
679cc562d2eSVineet Gupta * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
680cc562d2eSVineet Gupta * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
681cc562d2eSVineet Gupta * ~ ~ ~ ~
682cc562d2eSVineet Gupta * [set127] | 508| 509| 510| 511| | 254| 255|
683cc562d2eSVineet Gupta * --------------------- -----------
684cc562d2eSVineet Gupta * For normal operations we don't(must not) care how above works since
685cc562d2eSVineet Gupta * MMU cmd getIndex(vaddr) abstracts that out.
686cc562d2eSVineet Gupta * However for walking WAYS of a SET, we need to know this
687cc562d2eSVineet Gupta */
688cc562d2eSVineet Gupta #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
689cc562d2eSVineet Gupta
690cc562d2eSVineet Gupta /* Handling of Duplicate PD (TLB entry) in MMU.
691cc562d2eSVineet Gupta * -Could be due to buggy customer tapeouts or obscure kernel bugs
692cc562d2eSVineet Gupta * -MMU complaints not at the time of duplicate PD installation, but at the
693cc562d2eSVineet Gupta * time of lookup matching multiple ways.
694cc562d2eSVineet Gupta * -Ideally these should never happen - but if they do - workaround by deleting
695cc562d2eSVineet Gupta * the duplicate one.
696cc562d2eSVineet Gupta * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
697cc562d2eSVineet Gupta */
6985f840df5SFlavio Suligoi volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
699cc562d2eSVineet Gupta
do_tlb_overlap_fault(unsigned long cause,unsigned long address,struct pt_regs * regs)700cc562d2eSVineet Gupta void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
701cc562d2eSVineet Gupta struct pt_regs *regs)
702cc562d2eSVineet Gupta {
70372d861f2SVineet Gupta struct cpuinfo_arc_mmu *mmu = &mmuinfo;
7048840e14cSVineet Gupta unsigned long flags;
70589c92142SVineet Gupta int set, n_ways = mmu->ways;
70689c92142SVineet Gupta
70789c92142SVineet Gupta n_ways = min(n_ways, 4);
70889c92142SVineet Gupta BUG_ON(mmu->ways > 4);
709cc562d2eSVineet Gupta
710cc562d2eSVineet Gupta local_irq_save(flags);
711cc562d2eSVineet Gupta
712cc562d2eSVineet Gupta /* loop thru all sets of TLB */
713cc562d2eSVineet Gupta for (set = 0; set < mmu->sets; set++) {
714cc562d2eSVineet Gupta
7158840e14cSVineet Gupta int is_valid, way;
71689c92142SVineet Gupta unsigned int pd0[4];
7178840e14cSVineet Gupta
718cc562d2eSVineet Gupta /* read out all the ways of current set */
71989c92142SVineet Gupta for (way = 0, is_valid = 0; way < n_ways; way++) {
720cc562d2eSVineet Gupta write_aux_reg(ARC_REG_TLBINDEX,
721cc562d2eSVineet Gupta SET_WAY_TO_IDX(mmu, set, way));
722cc562d2eSVineet Gupta write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
723cc562d2eSVineet Gupta pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
724cc562d2eSVineet Gupta is_valid |= pd0[way] & _PAGE_PRESENT;
7258840e14cSVineet Gupta pd0[way] &= PAGE_MASK;
726cc562d2eSVineet Gupta }
727cc562d2eSVineet Gupta
728cc562d2eSVineet Gupta /* If all the WAYS in SET are empty, skip to next SET */
729cc562d2eSVineet Gupta if (!is_valid)
730cc562d2eSVineet Gupta continue;
731cc562d2eSVineet Gupta
732cc562d2eSVineet Gupta /* Scan the set for duplicate ways: needs a nested loop */
73389c92142SVineet Gupta for (way = 0; way < n_ways - 1; way++) {
7348840e14cSVineet Gupta
7358840e14cSVineet Gupta int n;
7368840e14cSVineet Gupta
737cc562d2eSVineet Gupta if (!pd0[way])
738cc562d2eSVineet Gupta continue;
739cc562d2eSVineet Gupta
74089c92142SVineet Gupta for (n = way + 1; n < n_ways; n++) {
7418840e14cSVineet Gupta if (pd0[way] != pd0[n])
7428840e14cSVineet Gupta continue;
743cc562d2eSVineet Gupta
7448840e14cSVineet Gupta if (!dup_pd_silent)
7458840e14cSVineet Gupta pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
7468840e14cSVineet Gupta pd0[way], set, way, n);
747cc562d2eSVineet Gupta
748cc562d2eSVineet Gupta /*
7498840e14cSVineet Gupta * clear entry @way and not @n.
7508840e14cSVineet Gupta * This is critical to our optimised loop
751cc562d2eSVineet Gupta */
7528840e14cSVineet Gupta pd0[way] = 0;
753cc562d2eSVineet Gupta write_aux_reg(ARC_REG_TLBINDEX,
754cc562d2eSVineet Gupta SET_WAY_TO_IDX(mmu, set, way));
755cc562d2eSVineet Gupta __tlb_entry_erase();
756cc562d2eSVineet Gupta }
757cc562d2eSVineet Gupta }
758cc562d2eSVineet Gupta }
759cc562d2eSVineet Gupta
760cc562d2eSVineet Gupta local_irq_restore(flags);
761cc562d2eSVineet Gupta }
762