xref: /openbmc/linux/arch/arc/mm/tlb.c (revision ac4cfacc)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f1f3347dSVineet Gupta /*
3288ff7deSVineet Gupta  * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
4f1f3347dSVineet Gupta  *
5f1f3347dSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6f1f3347dSVineet Gupta  *
7f1f3347dSVineet Gupta  */
8f1f3347dSVineet Gupta 
9f1f3347dSVineet Gupta #include <linux/module.h>
10483e9bcbSVineet Gupta #include <linux/bug.h>
11589ee628SIngo Molnar #include <linux/mm_types.h>
12589ee628SIngo Molnar 
13f1f3347dSVineet Gupta #include <asm/arcregs.h>
14d79e678dSVineet Gupta #include <asm/setup.h>
15f1f3347dSVineet Gupta #include <asm/mmu_context.h>
16da1677b0SVineet Gupta #include <asm/mmu.h>
17f1f3347dSVineet Gupta 
18f1f3347dSVineet Gupta /* A copy of the ASID from the PID reg is kept in asid_cache */
1963eca94cSVineet Gupta DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
20cc562d2eSVineet Gupta 
21b5ddb6d5SVineet Gupta static struct cpuinfo_arc_mmu {
22b5ddb6d5SVineet Gupta 	unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
23d79e678dSVineet Gupta } mmuinfo;
24d79e678dSVineet Gupta 
25483e9bcbSVineet Gupta /*
26d79e678dSVineet Gupta  * Utility Routine to erase a J-TLB entry
27483e9bcbSVineet Gupta  * Caller needs to setup Index Reg (manually or via getIndex)
28d79e678dSVineet Gupta  */
__tlb_entry_erase(void)29d79e678dSVineet Gupta static inline void __tlb_entry_erase(void)
305a364c2aSVineet Gupta {
315a364c2aSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD1, 0);
325a364c2aSVineet Gupta 
335a364c2aSVineet Gupta 	if (is_pae40_enabled())
34d79e678dSVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
35d79e678dSVineet Gupta 
36d79e678dSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, 0);
37d79e678dSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
381355ea2eSVineet Gupta }
391355ea2eSVineet Gupta 
utlb_invalidate(void)401355ea2eSVineet Gupta static void utlb_invalidate(void)
411355ea2eSVineet Gupta {
421355ea2eSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
43288ff7deSVineet Gupta }
44d7a512bfSVineet Gupta 
45483e9bcbSVineet Gupta #ifdef CONFIG_ARC_MMU_V3
46483e9bcbSVineet Gupta 
tlb_entry_lkup(unsigned long vaddr_n_asid)47483e9bcbSVineet Gupta static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
48483e9bcbSVineet Gupta {
49483e9bcbSVineet Gupta 	unsigned int idx;
50483e9bcbSVineet Gupta 
51483e9bcbSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
52483e9bcbSVineet Gupta 
53483e9bcbSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
54483e9bcbSVineet Gupta 	idx = read_aux_reg(ARC_REG_TLBINDEX);
55483e9bcbSVineet Gupta 
56483e9bcbSVineet Gupta 	return idx;
57d79e678dSVineet Gupta }
58d79e678dSVineet Gupta 
tlb_entry_erase(unsigned int vaddr_n_asid)59d79e678dSVineet Gupta static void tlb_entry_erase(unsigned int vaddr_n_asid)
60d79e678dSVineet Gupta {
61d79e678dSVineet Gupta 	unsigned int idx;
62483e9bcbSVineet Gupta 
63d79e678dSVineet Gupta 	/* Locate the TLB entry for this vaddr + ASID */
64d79e678dSVineet Gupta 	idx = tlb_entry_lkup(vaddr_n_asid);
65d79e678dSVineet Gupta 
66d79e678dSVineet Gupta 	/* No error means entry found, zero it out */
67483e9bcbSVineet Gupta 	if (likely(!(idx & TLB_LKUP_ERR))) {
68d79e678dSVineet Gupta 		__tlb_entry_erase();
69483e9bcbSVineet Gupta 	} else {
70d79e678dSVineet Gupta 		/* Duplicate entry error */
71d79e678dSVineet Gupta 		WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
72d79e678dSVineet Gupta 					   vaddr_n_asid);
73d79e678dSVineet Gupta 	}
74366440eeSVineet Gupta }
75483e9bcbSVineet Gupta 
tlb_entry_insert(unsigned int pd0,phys_addr_t pd1)76483e9bcbSVineet Gupta static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
77483e9bcbSVineet Gupta {
78483e9bcbSVineet Gupta 	unsigned int idx;
79483e9bcbSVineet Gupta 
80483e9bcbSVineet Gupta 	/*
81483e9bcbSVineet Gupta 	 * First verify if entry for this vaddr+ASID already exists
82483e9bcbSVineet Gupta 	 * This also sets up PD0 (vaddr, ASID..) for final commit
83483e9bcbSVineet Gupta 	 */
84483e9bcbSVineet Gupta 	idx = tlb_entry_lkup(pd0);
85483e9bcbSVineet Gupta 
86483e9bcbSVineet Gupta 	/*
87483e9bcbSVineet Gupta 	 * If Not already present get a free slot from MMU.
88483e9bcbSVineet Gupta 	 * Otherwise, Probe would have located the entry and set INDEX Reg
89483e9bcbSVineet Gupta 	 * with existing location. This will cause Write CMD to over-write
90483e9bcbSVineet Gupta 	 * existing entry with new PD0 and PD1
91483e9bcbSVineet Gupta 	 */
92483e9bcbSVineet Gupta 	if (likely(idx & TLB_LKUP_ERR))
93483e9bcbSVineet Gupta 		write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
94483e9bcbSVineet Gupta 
95483e9bcbSVineet Gupta 	/* setup the other half of TLB entry (pfn, rwx..) */
96483e9bcbSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD1, pd1);
97483e9bcbSVineet Gupta 
987423cc0cSAdam Buchbinder 	/*
99483e9bcbSVineet Gupta 	 * Commit the Entry to MMU
100483e9bcbSVineet Gupta 	 * It doesn't sound safe to use the TLBWriteNI cmd here
101483e9bcbSVineet Gupta 	 * which doesn't flush uTLBs. I'd rather be safe than sorry.
102483e9bcbSVineet Gupta 	 */
103483e9bcbSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
104288ff7deSVineet Gupta }
105d7a512bfSVineet Gupta 
106d7a512bfSVineet Gupta #else	/* MMUv4 */
107d7a512bfSVineet Gupta 
tlb_entry_erase(unsigned int vaddr_n_asid)108d7a512bfSVineet Gupta static void tlb_entry_erase(unsigned int vaddr_n_asid)
109d7a512bfSVineet Gupta {
110d7a512bfSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
111d7a512bfSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
112366440eeSVineet Gupta }
113d7a512bfSVineet Gupta 
tlb_entry_insert(unsigned int pd0,phys_addr_t pd1)114d7a512bfSVineet Gupta static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
1155a364c2aSVineet Gupta {
116366440eeSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, pd0);
117366440eeSVineet Gupta 
118366440eeSVineet Gupta 	if (!is_pae40_enabled()) {
119366440eeSVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1, pd1);
1205a364c2aSVineet Gupta 	} else {
121366440eeSVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
1225a364c2aSVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
123d7a512bfSVineet Gupta 	}
124d7a512bfSVineet Gupta 
125d7a512bfSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
126d7a512bfSVineet Gupta }
127d7a512bfSVineet Gupta 
128d79e678dSVineet Gupta #endif
129d79e678dSVineet Gupta 
130d79e678dSVineet Gupta /*
131d79e678dSVineet Gupta  * Un-conditionally (without lookup) erase the entire MMU contents
132d79e678dSVineet Gupta  */
133d79e678dSVineet Gupta 
local_flush_tlb_all(void)134b598e17fSVineet Gupta noinline void local_flush_tlb_all(void)
135d79e678dSVineet Gupta {
136d79e678dSVineet Gupta 	struct cpuinfo_arc_mmu *mmu = &mmuinfo;
137b598e17fSVineet Gupta 	unsigned long flags;
138d79e678dSVineet Gupta 	unsigned int entry;
139d79e678dSVineet Gupta 	int num_tlb = mmu->sets * mmu->ways;
140d79e678dSVineet Gupta 
141d79e678dSVineet Gupta 	local_irq_save(flags);
142d79e678dSVineet Gupta 
1435a364c2aSVineet Gupta 	/* Load PD0 and PD1 with template for a Blank Entry */
1445a364c2aSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD1, 0);
1455a364c2aSVineet Gupta 
1465a364c2aSVineet Gupta 	if (is_pae40_enabled())
147d79e678dSVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
148d79e678dSVineet Gupta 
149b598e17fSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, 0);
150d79e678dSVineet Gupta 
151d79e678dSVineet Gupta 	for (entry = 0; entry < num_tlb; entry++) {
1521355ea2eSVineet Gupta 		/* write this entry to the TLB */
153d79e678dSVineet Gupta 		write_aux_reg(ARC_REG_TLBINDEX, entry);
154d79e678dSVineet Gupta 		write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
155fe6c1b86SVineet Gupta 	}
156fe6c1b86SVineet Gupta 
157fe6c1b86SVineet Gupta 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
158fe6c1b86SVineet Gupta 		const int stlb_idx = 0x800;
159fe6c1b86SVineet Gupta 
160fe6c1b86SVineet Gupta 		/* Blank sTLB entry */
161fe6c1b86SVineet Gupta 		write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
162fe6c1b86SVineet Gupta 
1631355ea2eSVineet Gupta 		for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
164fe6c1b86SVineet Gupta 			write_aux_reg(ARC_REG_TLBINDEX, entry);
165fe6c1b86SVineet Gupta 			write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
166fe6c1b86SVineet Gupta 		}
167d79e678dSVineet Gupta 	}
168d79e678dSVineet Gupta 
169d79e678dSVineet Gupta 	utlb_invalidate();
170d79e678dSVineet Gupta 
171d79e678dSVineet Gupta 	local_irq_restore(flags);
172d79e678dSVineet Gupta }
1735f840df5SFlavio Suligoi 
174d79e678dSVineet Gupta /*
175d79e678dSVineet Gupta  * Flush the entire MM for userland. The fastest way is to move to Next ASID
176d79e678dSVineet Gupta  */
local_flush_tlb_mm(struct mm_struct * mm)177d79e678dSVineet Gupta noinline void local_flush_tlb_mm(struct mm_struct *mm)
178d79e678dSVineet Gupta {
179d79e678dSVineet Gupta 	/*
180d79e678dSVineet Gupta 	 * Small optimisation courtesy IA64
181d79e678dSVineet Gupta 	 * flush_mm called during fork,exit,munmap etc, multiple times as well.
182d79e678dSVineet Gupta 	 * Only for fork( ) do we need to move parent to a new MMU ctxt,
183d79e678dSVineet Gupta 	 * all other cases are NOPs, hence this check.
184d79e678dSVineet Gupta 	 */
185d79e678dSVineet Gupta 	if (atomic_read(&mm->mm_users) == 0)
186d79e678dSVineet Gupta 		return;
1873daa48d1SVineet Gupta 
1883daa48d1SVineet Gupta 	/*
1893daa48d1SVineet Gupta 	 * - Move to a new ASID, but only if the mm is still wired in
1903daa48d1SVineet Gupta 	 *   (Android Binder ended up calling this for vma->mm != tsk->mm,
1913daa48d1SVineet Gupta 	 *    causing h/w - s/w ASID to get out of sync)
192d79e678dSVineet Gupta 	 * - Also get_new_mmu_context() new implementation allocates a new
193d79e678dSVineet Gupta 	 *   ASID only if it is not allocated already - so unallocate first
1943daa48d1SVineet Gupta 	 */
195d79e678dSVineet Gupta 	destroy_context(mm);
196d79e678dSVineet Gupta 	if (current->mm == mm)
197d79e678dSVineet Gupta 		get_new_mmu_context(mm);
198d79e678dSVineet Gupta }
199d79e678dSVineet Gupta 
200d79e678dSVineet Gupta /*
201d79e678dSVineet Gupta  * Flush a Range of TLB entries for userland.
202d79e678dSVineet Gupta  * @start is inclusive, while @end is exclusive
203d79e678dSVineet Gupta  * Difference between this and Kernel Range Flush is
2045f840df5SFlavio Suligoi  *  -Here the fastest way (if range is too large) is to move to next ASID
205d79e678dSVineet Gupta  *      without doing any explicit Shootdown
206d79e678dSVineet Gupta  *  -In case of kernel Flush, entry has to be shot down explicitly
207d79e678dSVineet Gupta  */
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)208d79e678dSVineet Gupta void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
20963eca94cSVineet Gupta 			   unsigned long end)
210d79e678dSVineet Gupta {
211d79e678dSVineet Gupta 	const unsigned int cpu = smp_processor_id();
212d79e678dSVineet Gupta 	unsigned long flags;
213d79e678dSVineet Gupta 
214d79e678dSVineet Gupta 	/* If range @start to @end is more than 32 TLB entries deep,
215d79e678dSVineet Gupta 	 * its better to move to a new ASID rather than searching for
216d79e678dSVineet Gupta 	 * individual entries and then shooting them down
217d79e678dSVineet Gupta 	 *
218d79e678dSVineet Gupta 	 * The calc above is rough, doesn't account for unaligned parts,
219d79e678dSVineet Gupta 	 * since this is heuristics based anyways
220d79e678dSVineet Gupta 	 */
221d79e678dSVineet Gupta 	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
222d79e678dSVineet Gupta 		local_flush_tlb_mm(vma->vm_mm);
223d79e678dSVineet Gupta 		return;
224d79e678dSVineet Gupta 	}
225d79e678dSVineet Gupta 
226d79e678dSVineet Gupta 	/*
227d79e678dSVineet Gupta 	 * @start moved to page start: this alone suffices for checking
228d79e678dSVineet Gupta 	 * loop end condition below, w/o need for aligning @end to end
229d79e678dSVineet Gupta 	 * e.g. 2000 to 4001 will anyhow loop twice
230d79e678dSVineet Gupta 	 */
231d79e678dSVineet Gupta 	start &= PAGE_MASK;
232d79e678dSVineet Gupta 
23363eca94cSVineet Gupta 	local_irq_save(flags);
234d79e678dSVineet Gupta 
23563eca94cSVineet Gupta 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
236d79e678dSVineet Gupta 		while (start < end) {
237d79e678dSVineet Gupta 			tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
238d79e678dSVineet Gupta 			start += PAGE_SIZE;
239d79e678dSVineet Gupta 		}
240d79e678dSVineet Gupta 	}
241d79e678dSVineet Gupta 
242d79e678dSVineet Gupta 	local_irq_restore(flags);
243d79e678dSVineet Gupta }
244d79e678dSVineet Gupta 
245d79e678dSVineet Gupta /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
246d79e678dSVineet Gupta  *  @start, @end interpreted as kvaddr
247d79e678dSVineet Gupta  * Interestingly, shared TLB entries can also be flushed using just
248d79e678dSVineet Gupta  * @start,@end alone (interpreted as user vaddr), although technically SASID
249d79e678dSVineet Gupta  * is also needed. However our smart TLbProbe lookup takes care of that.
250d79e678dSVineet Gupta  */
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)251d79e678dSVineet Gupta void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
252d79e678dSVineet Gupta {
253d79e678dSVineet Gupta 	unsigned long flags;
254d79e678dSVineet Gupta 
255d79e678dSVineet Gupta 	/* exactly same as above, except for TLB entry not taking ASID */
256d79e678dSVineet Gupta 
257d79e678dSVineet Gupta 	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
258d79e678dSVineet Gupta 		local_flush_tlb_all();
259d79e678dSVineet Gupta 		return;
260d79e678dSVineet Gupta 	}
261d79e678dSVineet Gupta 
262d79e678dSVineet Gupta 	start &= PAGE_MASK;
263d79e678dSVineet Gupta 
264d79e678dSVineet Gupta 	local_irq_save(flags);
265d79e678dSVineet Gupta 	while (start < end) {
266d79e678dSVineet Gupta 		tlb_entry_erase(start);
267d79e678dSVineet Gupta 		start += PAGE_SIZE;
268d79e678dSVineet Gupta 	}
269d79e678dSVineet Gupta 
270d79e678dSVineet Gupta 	local_irq_restore(flags);
271d79e678dSVineet Gupta }
272d79e678dSVineet Gupta 
273d79e678dSVineet Gupta /*
274d79e678dSVineet Gupta  * Delete TLB entry in MMU for a given page (??? address)
275d79e678dSVineet Gupta  * NOTE One TLB entry contains translation for single PAGE
276d79e678dSVineet Gupta  */
277d79e678dSVineet Gupta 
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)27863eca94cSVineet Gupta void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
279d79e678dSVineet Gupta {
280d79e678dSVineet Gupta 	const unsigned int cpu = smp_processor_id();
281d79e678dSVineet Gupta 	unsigned long flags;
282d79e678dSVineet Gupta 
283d79e678dSVineet Gupta 	/* Note that it is critical that interrupts are DISABLED between
284d79e678dSVineet Gupta 	 * checking the ASID and using it flush the TLB entry
285d79e678dSVineet Gupta 	 */
28663eca94cSVineet Gupta 	local_irq_save(flags);
28763eca94cSVineet Gupta 
288d79e678dSVineet Gupta 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
289d79e678dSVineet Gupta 		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
290d79e678dSVineet Gupta 	}
291d79e678dSVineet Gupta 
292cc562d2eSVineet Gupta 	local_irq_restore(flags);
2935ea72a90SVineet Gupta }
2945ea72a90SVineet Gupta 
2955ea72a90SVineet Gupta #ifdef CONFIG_SMP
2965ea72a90SVineet Gupta 
2975ea72a90SVineet Gupta struct tlb_args {
2985ea72a90SVineet Gupta 	struct vm_area_struct *ta_vma;
2995ea72a90SVineet Gupta 	unsigned long ta_start;
3005ea72a90SVineet Gupta 	unsigned long ta_end;
3015ea72a90SVineet Gupta };
3025ea72a90SVineet Gupta 
ipi_flush_tlb_page(void * arg)3035ea72a90SVineet Gupta static inline void ipi_flush_tlb_page(void *arg)
3045ea72a90SVineet Gupta {
3055ea72a90SVineet Gupta 	struct tlb_args *ta = arg;
3065ea72a90SVineet Gupta 
3075ea72a90SVineet Gupta 	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
3085ea72a90SVineet Gupta }
3095ea72a90SVineet Gupta 
ipi_flush_tlb_range(void * arg)3105ea72a90SVineet Gupta static inline void ipi_flush_tlb_range(void *arg)
3115ea72a90SVineet Gupta {
3125ea72a90SVineet Gupta 	struct tlb_args *ta = arg;
3135ea72a90SVineet Gupta 
3145ea72a90SVineet Gupta 	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
315c7119d56SVineet Gupta }
316c7119d56SVineet Gupta 
317c7119d56SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ipi_flush_pmd_tlb_range(void * arg)318c7119d56SVineet Gupta static inline void ipi_flush_pmd_tlb_range(void *arg)
319c7119d56SVineet Gupta {
320c7119d56SVineet Gupta 	struct tlb_args *ta = arg;
321c7119d56SVineet Gupta 
322c7119d56SVineet Gupta 	local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
323c7119d56SVineet Gupta }
3245ea72a90SVineet Gupta #endif
3255ea72a90SVineet Gupta 
ipi_flush_tlb_kernel_range(void * arg)3265ea72a90SVineet Gupta static inline void ipi_flush_tlb_kernel_range(void *arg)
3275ea72a90SVineet Gupta {
3285ea72a90SVineet Gupta 	struct tlb_args *ta = (struct tlb_args *)arg;
3295ea72a90SVineet Gupta 
3305ea72a90SVineet Gupta 	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
3315ea72a90SVineet Gupta }
3325ea72a90SVineet Gupta 
flush_tlb_all(void)3335ea72a90SVineet Gupta void flush_tlb_all(void)
3345ea72a90SVineet Gupta {
3355ea72a90SVineet Gupta 	on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
3365ea72a90SVineet Gupta }
3375ea72a90SVineet Gupta 
flush_tlb_mm(struct mm_struct * mm)3385ea72a90SVineet Gupta void flush_tlb_mm(struct mm_struct *mm)
3395ea72a90SVineet Gupta {
3405ea72a90SVineet Gupta 	on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
3415ea72a90SVineet Gupta 			 mm, 1);
3425ea72a90SVineet Gupta }
3435ea72a90SVineet Gupta 
flush_tlb_page(struct vm_area_struct * vma,unsigned long uaddr)3445ea72a90SVineet Gupta void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3455ea72a90SVineet Gupta {
3465ea72a90SVineet Gupta 	struct tlb_args ta = {
3475ea72a90SVineet Gupta 		.ta_vma = vma,
3485ea72a90SVineet Gupta 		.ta_start = uaddr
3495ea72a90SVineet Gupta 	};
3505ea72a90SVineet Gupta 
3515ea72a90SVineet Gupta 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
3525ea72a90SVineet Gupta }
3535ea72a90SVineet Gupta 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)3545ea72a90SVineet Gupta void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
3555ea72a90SVineet Gupta 		     unsigned long end)
3565ea72a90SVineet Gupta {
3575ea72a90SVineet Gupta 	struct tlb_args ta = {
3585ea72a90SVineet Gupta 		.ta_vma = vma,
3595ea72a90SVineet Gupta 		.ta_start = start,
3605ea72a90SVineet Gupta 		.ta_end = end
3615ea72a90SVineet Gupta 	};
3625ea72a90SVineet Gupta 
3635ea72a90SVineet Gupta 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
364c7119d56SVineet Gupta }
365c7119d56SVineet Gupta 
366c7119d56SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)367c7119d56SVineet Gupta void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
368c7119d56SVineet Gupta 			 unsigned long end)
369c7119d56SVineet Gupta {
370c7119d56SVineet Gupta 	struct tlb_args ta = {
371c7119d56SVineet Gupta 		.ta_vma = vma,
372c7119d56SVineet Gupta 		.ta_start = start,
373c7119d56SVineet Gupta 		.ta_end = end
374c7119d56SVineet Gupta 	};
375c7119d56SVineet Gupta 
376c7119d56SVineet Gupta 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
377c7119d56SVineet Gupta }
3785ea72a90SVineet Gupta #endif
3795ea72a90SVineet Gupta 
flush_tlb_kernel_range(unsigned long start,unsigned long end)3805ea72a90SVineet Gupta void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3815ea72a90SVineet Gupta {
3825ea72a90SVineet Gupta 	struct tlb_args ta = {
3835ea72a90SVineet Gupta 		.ta_start = start,
3845ea72a90SVineet Gupta 		.ta_end = end
3855ea72a90SVineet Gupta 	};
3865ea72a90SVineet Gupta 
3875ea72a90SVineet Gupta 	on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
3885ea72a90SVineet Gupta }
389cc562d2eSVineet Gupta #endif
390cc562d2eSVineet Gupta 
391cc562d2eSVineet Gupta /*
39228b4af72SVineet Gupta  * Routine to create a TLB entry
393cc562d2eSVineet Gupta  */
create_tlb(struct vm_area_struct * vma,unsigned long vaddr,pte_t * ptep)394cc562d2eSVineet Gupta static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
395483e9bcbSVineet Gupta {
3965a364c2aSVineet Gupta 	unsigned long flags;
397366440eeSVineet Gupta 	unsigned int asid_or_sasid, rwx;
398cc562d2eSVineet Gupta 	unsigned long pd0;
399cc562d2eSVineet Gupta 	phys_addr_t pd1;
400cc562d2eSVineet Gupta 
401cc562d2eSVineet Gupta 	/*
402cc562d2eSVineet Gupta 	 * create_tlb() assumes that current->mm == vma->mm, since
403cc562d2eSVineet Gupta 	 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
404cc562d2eSVineet Gupta 	 * -completes the lazy write to SASID reg (again valid for curr tsk)
405cc562d2eSVineet Gupta 	 *
406cc562d2eSVineet Gupta 	 * Removing the assumption involves
407cc562d2eSVineet Gupta 	 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
408cc562d2eSVineet Gupta 	 * -More importantly it makes this handler inconsistent with fast-path
409cc562d2eSVineet Gupta 	 *  TLB Refill handler which always deals with "current"
410cc562d2eSVineet Gupta 	 *
411cc562d2eSVineet Gupta 	 * Lets see the use cases when current->mm != vma->mm and we land here
412cc562d2eSVineet Gupta 	 *  1. execve->copy_strings()->__get_user_pages->handle_mm_fault
413cc562d2eSVineet Gupta 	 *     Here VM wants to pre-install a TLB entry for user stack while
414cc562d2eSVineet Gupta 	 *     current->mm still points to pre-execve mm (hence the condition).
415cc562d2eSVineet Gupta 	 *     However the stack vaddr is soon relocated (randomization) and
416cc562d2eSVineet Gupta 	 *     move_page_tables() tries to undo that TLB entry.
417cc562d2eSVineet Gupta 	 *     Thus not creating TLB entry is not any worse.
418cc562d2eSVineet Gupta 	 *
419cc562d2eSVineet Gupta 	 *  2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
420cc562d2eSVineet Gupta 	 *     breakpoint in debugged task. Not creating a TLB now is not
421cc562d2eSVineet Gupta 	 *     performance critical.
422cc562d2eSVineet Gupta 	 *
423cc562d2eSVineet Gupta 	 * Both the cases above are not good enough for code churn.
424cc562d2eSVineet Gupta 	 */
425cc562d2eSVineet Gupta 	if (current->active_mm != vma->vm_mm)
426cc562d2eSVineet Gupta 		return;
427cc562d2eSVineet Gupta 
42828b4af72SVineet Gupta 	local_irq_save(flags);
429cc562d2eSVineet Gupta 
430cc562d2eSVineet Gupta 	vaddr &= PAGE_MASK;
431cc562d2eSVineet Gupta 
432cc562d2eSVineet Gupta 	/* update this PTE credentials */
433d091fcb9SVineet Gupta 	pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
434cc562d2eSVineet Gupta 
435cc562d2eSVineet Gupta 	/* Create HW TLB(PD0,PD1) from PTE  */
436cc562d2eSVineet Gupta 
437cc562d2eSVineet Gupta 	/* ASID for this task */
43828b4af72SVineet Gupta 	asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
439cc562d2eSVineet Gupta 
44064b703efSVineet Gupta 	pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
44164b703efSVineet Gupta 
44264b703efSVineet Gupta 	/*
44364b703efSVineet Gupta 	 * ARC MMU provides fully orthogonal access bits for K/U mode,
44464b703efSVineet Gupta 	 * however Linux only saves 1 set to save PTE real-estate
44564b703efSVineet Gupta 	 * Here we convert 3 PTE bits into 6 MMU bits:
44664b703efSVineet Gupta 	 * -Kernel only entries have Kr Kw Kx 0 0 0
44764b703efSVineet Gupta 	 * -User entries have mirrored K and U bits
44864b703efSVineet Gupta 	 */
44964b703efSVineet Gupta 	rwx = pte_val(*ptep) & PTE_BITS_RWX;
45064b703efSVineet Gupta 
45164b703efSVineet Gupta 	if (pte_val(*ptep) & _PAGE_GLOBAL)
45264b703efSVineet Gupta 		rwx <<= 3;		/* r w x => Kr Kw Kx 0 0 0 */
45364b703efSVineet Gupta 	else
454483e9bcbSVineet Gupta 		rwx |= (rwx << 3);	/* r w x => Kr Kw Kx Ur Uw Ux */
455cc562d2eSVineet Gupta 
456483e9bcbSVineet Gupta 	pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
457cc562d2eSVineet Gupta 
458cc562d2eSVineet Gupta 	tlb_entry_insert(pd0, pd1);
459cc562d2eSVineet Gupta 
460cc562d2eSVineet Gupta 	local_irq_restore(flags);
461eacd0e95SVineet Gupta }
462eacd0e95SVineet Gupta 
463eacd0e95SVineet Gupta /*
4644102b533SVineet Gupta  * Called at the end of pagefault, for a userspace mapped page
4654102b533SVineet Gupta  *  -pre-install the corresponding TLB entry into MMU
4664102b533SVineet Gupta  *  -Finalize the delayed D-cache flush of kernel mapping of page due to
4674102b533SVineet Gupta  *  	flush_dcache_page(), copy_user_page()
4684102b533SVineet Gupta  *
469cc562d2eSVineet Gupta  * Note that flush (when done) involves both WBACK - so physical page is
470*ac4cfaccSMatthew Wilcox (Oracle)  * in sync as well as INV - so any non-congruent aliases don't remain
471*ac4cfaccSMatthew Wilcox (Oracle)  */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long vaddr_unaligned,pte_t * ptep,unsigned int nr)472cc562d2eSVineet Gupta void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
47324603fddSVineet Gupta 		unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
474c5f756d8SVladimir Isaev {
47529b93c68SVineet Gupta 	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
476cc562d2eSVineet Gupta 	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
47724603fddSVineet Gupta 	struct page *page = pfn_to_page(pte_pfn(*ptep));
47824603fddSVineet Gupta 
47929b93c68SVineet Gupta 	create_tlb(vma, vaddr, ptep);
48029b93c68SVineet Gupta 
48129b93c68SVineet Gupta 	if (page == ZERO_PAGE(0)) {
48229b93c68SVineet Gupta 		return;
4834102b533SVineet Gupta 	}
4844102b533SVineet Gupta 
4854102b533SVineet Gupta 	/*
4864102b533SVineet Gupta 	 * Exec page : Independent of aliasing/page-color considerations,
4874102b533SVineet Gupta 	 *	       since icache doesn't snoop dcache on ARC, any dirty
4884102b533SVineet Gupta 	 *	       K-mapping of a code page needs to be wback+inv so that
4894102b533SVineet Gupta 	 *	       icache fetch by userspace sees code correctly.
4904102b533SVineet Gupta 	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
4914102b533SVineet Gupta 	 *	       so userspace sees the right data.
4923e87974dSVineet Gupta 	 *  (Avoids the flush for Non-exec + congruent mapping case)
4933e87974dSVineet Gupta 	 */
494*ac4cfaccSMatthew Wilcox (Oracle) 	if ((vma->vm_flags & VM_EXEC) ||
495*ac4cfaccSMatthew Wilcox (Oracle) 	     addr_not_cache_congruent(paddr, vaddr)) {
496eacd0e95SVineet Gupta 		struct folio *folio = page_folio(page);
497*ac4cfaccSMatthew Wilcox (Oracle) 		int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
498*ac4cfaccSMatthew Wilcox (Oracle) 		if (dirty) {
499*ac4cfaccSMatthew Wilcox (Oracle) 			unsigned long offset = offset_in_folio(folio, paddr);
500*ac4cfaccSMatthew Wilcox (Oracle) 			nr = folio_nr_pages(folio);
50161a16348SVineet Gupta 			paddr -= offset;
502*ac4cfaccSMatthew Wilcox (Oracle) 			vaddr -= offset;
5034102b533SVineet Gupta 			/* wback + inv dcache lines (K-mapping) */
50461a16348SVineet Gupta 			__flush_dcache_pages(paddr, paddr, nr);
5054102b533SVineet Gupta 
506*ac4cfaccSMatthew Wilcox (Oracle) 			/* invalidate any existing icache lines (U-mapping) */
50724603fddSVineet Gupta 			if (vma->vm_flags & VM_EXEC)
508cc562d2eSVineet Gupta 				__inv_icache_pages(paddr, vaddr, nr);
509eacd0e95SVineet Gupta 		}
510cc562d2eSVineet Gupta 	}
511fe6c1b86SVineet Gupta }
512fe6c1b86SVineet Gupta 
513fe6c1b86SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
514fe6c1b86SVineet Gupta 
515fe6c1b86SVineet Gupta /*
516fe6c1b86SVineet Gupta  * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
517fe6c1b86SVineet Gupta  * support.
5187423cc0cSAdam Buchbinder  *
519fe6c1b86SVineet Gupta  * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
520fe6c1b86SVineet Gupta  * new bit "SZ" in TLB page descriptor to distinguish between them.
521fe6c1b86SVineet Gupta  * Super Page size is configurable in hardware (4K to 16M), but fixed once
5225f840df5SFlavio Suligoi  * RTL builds.
523fe6c1b86SVineet Gupta  *
524fe6c1b86SVineet Gupta  * The exact THP size a Linux configuration will support is a function of:
525fe6c1b86SVineet Gupta  *  - MMU page size (typical 8K, RTL fixed)
526fe6c1b86SVineet Gupta  *  - software page walker address split between PGD:PTE:PFN (typical
527fe6c1b86SVineet Gupta  *    11:8:13, but can be changed with 1 line)
528fe6c1b86SVineet Gupta  * So for above default, THP size supported is 8K * (2^8) = 2M
529fe6c1b86SVineet Gupta  *
530fe6c1b86SVineet Gupta  * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
531fe6c1b86SVineet Gupta  * reduces to 1 level (as PTE is folded into PGD and canonically referred
532fe6c1b86SVineet Gupta  * to as PMD).
533fe6c1b86SVineet Gupta  * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
534fe6c1b86SVineet Gupta  */
535fe6c1b86SVineet Gupta 
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)536fe6c1b86SVineet Gupta void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
537fe6c1b86SVineet Gupta 				 pmd_t *pmd)
538*ac4cfaccSMatthew Wilcox (Oracle) {
539fe6c1b86SVineet Gupta 	pte_t pte = __pte(pmd_val(*pmd));
540fe6c1b86SVineet Gupta 	update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
541c7119d56SVineet Gupta }
542722fe8fdSVineet Gupta 
local_flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)543722fe8fdSVineet Gupta void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
544722fe8fdSVineet Gupta 			       unsigned long end)
545722fe8fdSVineet Gupta {
546722fe8fdSVineet Gupta 	unsigned int cpu;
547722fe8fdSVineet Gupta 	unsigned long flags;
548722fe8fdSVineet Gupta 
549722fe8fdSVineet Gupta 	local_irq_save(flags);
550722fe8fdSVineet Gupta 
551722fe8fdSVineet Gupta 	cpu = smp_processor_id();
552722fe8fdSVineet Gupta 
553722fe8fdSVineet Gupta 	if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
554722fe8fdSVineet Gupta 		unsigned int asid = hw_pid(vma->vm_mm, cpu);
555722fe8fdSVineet Gupta 
556722fe8fdSVineet Gupta 		/* No need to loop here: this will always be for 1 Huge Page */
557722fe8fdSVineet Gupta 		tlb_entry_erase(start | _PAGE_HW_SZ | asid);
558722fe8fdSVineet Gupta 	}
559722fe8fdSVineet Gupta 
560722fe8fdSVineet Gupta 	local_irq_restore(flags);
561fe6c1b86SVineet Gupta }
562fe6c1b86SVineet Gupta 
5635f840df5SFlavio Suligoi #endif
564cc562d2eSVineet Gupta 
565cc562d2eSVineet Gupta /* Read the Cache Build Configuration Registers, Decode them and save into
566cc562d2eSVineet Gupta  * the cpuinfo structure for later use.
567ce759956SPaul Gortmaker  * No Validation is done here, simply read/convert the BCRs
568cc562d2eSVineet Gupta  */
arc_mmu_mumbojumbo(int c,char * buf,int len)569cc562d2eSVineet Gupta int arc_mmu_mumbojumbo(int c, char *buf, int len)
570da1677b0SVineet Gupta {
571da1677b0SVineet Gupta 	struct cpuinfo_arc_mmu *mmu = &mmuinfo;
572da1677b0SVineet Gupta 	unsigned int bcr, u_dtlb, u_itlb, sasid;
573d0890ea5SVineet Gupta 	struct bcr_mmu_3 *mmu3;
574da1677b0SVineet Gupta 	struct bcr_mmu_4 *mmu4;
575da1677b0SVineet Gupta 	char super_pg[64] = "";
576d0890ea5SVineet Gupta 	int n = 0;
577da1677b0SVineet Gupta 
578da1677b0SVineet Gupta 	bcr = read_aux_reg(ARC_REG_MMU_BCR);
579da1677b0SVineet Gupta 	mmu->ver = (bcr >> 24);
580cc562d2eSVineet Gupta 
581d7a512bfSVineet Gupta 	if (is_isa_arcompact() && mmu->ver == 3) {
582d7a512bfSVineet Gupta 		mmu3 = (struct bcr_mmu_3 *)&bcr;
583d7a512bfSVineet Gupta 		mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
584d7a512bfSVineet Gupta 		mmu->sets = 1 << mmu3->sets;
585d7a512bfSVineet Gupta 		mmu->ways = 1 << mmu3->ways;
586d7a512bfSVineet Gupta 		u_dtlb = mmu3->u_dtlb;
587d7a512bfSVineet Gupta 		u_itlb = mmu3->u_itlb;
588d7a512bfSVineet Gupta 		sasid = mmu3->sasid;
589d7a512bfSVineet Gupta 	} else {
590d7a512bfSVineet Gupta 		mmu4 = (struct bcr_mmu_4 *)&bcr;
591d7a512bfSVineet Gupta 		mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
592cc562d2eSVineet Gupta 		mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
593cc562d2eSVineet Gupta 		mmu->sets = 64 << mmu4->n_entry;
594cc562d2eSVineet Gupta 		mmu->ways = mmu4->n_ways * 2;
595288ff7deSVineet Gupta 		u_dtlb = mmu4->u_dtlb * 4;
596cc562d2eSVineet Gupta 		u_itlb = mmu4->u_itlb * 4;
59740b552d9SVineet Gupta 		sasid = mmu4->sasid;
598cc562d2eSVineet Gupta 		mmu->pae = mmu4->pae;
599cc562d2eSVineet Gupta 	}
600cc562d2eSVineet Gupta 
601cc562d2eSVineet Gupta 	if (mmu->s_pg_sz_m)
602d0890ea5SVineet Gupta 		scnprintf(super_pg, 64, "/%dM%s",
603d7a512bfSVineet Gupta 			  mmu->s_pg_sz_m,
604d7a512bfSVineet Gupta 			  IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
605d7a512bfSVineet Gupta 
606d7a512bfSVineet Gupta 	n += scnprintf(buf + n, len - n,
607d7a512bfSVineet Gupta 		      "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
608d7a512bfSVineet Gupta 		       mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
609d7a512bfSVineet Gupta 		       mmu->sets, mmu->ways,
610d7a512bfSVineet Gupta 		       u_dtlb, u_itlb,
611d0890ea5SVineet Gupta 		       IS_AVAIL1(sasid, ", SASID"),
612b5ddb6d5SVineet Gupta 		       IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
613cc562d2eSVineet Gupta 
614cc562d2eSVineet Gupta 	return n;
615cc562d2eSVineet Gupta }
616af617428SVineet Gupta 
pae40_exist_but_not_enab(void)617af617428SVineet Gupta int pae40_exist_but_not_enab(void)
618af617428SVineet Gupta {
619e3edeb67SNoam Camus 	return mmuinfo.pae && !is_pae40_enabled();
620d7a512bfSVineet Gupta }
621d7a512bfSVineet Gupta 
arc_mmu_init(void)622d7a512bfSVineet Gupta void arc_mmu_init(void)
623d7c46114SVineet Gupta {
6246ce18798SVineet Gupta 	struct cpuinfo_arc_mmu *mmu = &mmuinfo;
625964cf28fSVineet Gupta 	int compat = 0;
626af617428SVineet Gupta 
627af617428SVineet Gupta 	/*
6282dde02abSVineet Gupta 	 * Can't be done in processor.h due to header include dependencies
6292dde02abSVineet Gupta 	 */
630b598e17fSVineet Gupta 	BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
6315a364c2aSVineet Gupta 
632d7c46114SVineet Gupta 	/*
633af617428SVineet Gupta 	 * stack top size sanity check,
634af617428SVineet Gupta 	 * Can't be done in processor.h due to header include dependencies
635af617428SVineet Gupta 	 */
636af617428SVineet Gupta 	BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
637b5ddb6d5SVineet Gupta 
638b5ddb6d5SVineet Gupta 	/*
639b5ddb6d5SVineet Gupta 	 * Ensure that MMU features assumed by kernel exist in hardware.
640b5ddb6d5SVineet Gupta 	 *  - For older ARC700 cpus, only v3 supported
641b5ddb6d5SVineet Gupta 	 *  - For HS cpus, v4 was baseline and v5 is backwards compatible
642ce759956SPaul Gortmaker 	 *    (will run older software).
643cc562d2eSVineet Gupta 	 */
644af617428SVineet Gupta 	if (is_isa_arcompact() && mmu->ver == 3)
64592d44128SVineet Gupta 		compat = 1;
64692d44128SVineet Gupta 	else if (is_isa_arcv2() && mmu->ver >= 4)
647af617428SVineet Gupta 		compat = 1;
64818ee4becSNoam Camus 
649af617428SVineet Gupta 	if (!compat)
65015ca68a9SNoam Camus 		panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
6515f840df5SFlavio Suligoi 
65215ca68a9SNoam Camus 	if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
65315ca68a9SNoam Camus 		panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
65415ca68a9SNoam Camus 
6558bcf2c48SNoam Camus 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
6568bcf2c48SNoam Camus 	    mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
6575f840df5SFlavio Suligoi 		panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
6588bcf2c48SNoam Camus 		      (unsigned long)TO_MB(HPAGE_PMD_SIZE));
6598bcf2c48SNoam Camus 
6608bcf2c48SNoam Camus 	if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
66192d44128SVineet Gupta 		panic("Hardware doesn't support PAE40\n");
66292d44128SVineet Gupta 
663288ff7deSVineet Gupta 	/* Enable the MMU with ASID 0 */
664288ff7deSVineet Gupta 	mmu_setup_asid(NULL, 0);
66592d44128SVineet Gupta 
666af617428SVineet Gupta 	/* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
667288ff7deSVineet Gupta 	mmu_setup_pgd(NULL, swapper_pg_dir);
66892d44128SVineet Gupta 
669288ff7deSVineet Gupta 	if (pae40_exist_but_not_enab())
67092d44128SVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
67192d44128SVineet Gupta }
672288ff7deSVineet Gupta 
673288ff7deSVineet Gupta /*
674af617428SVineet Gupta  * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
67540b552d9SVineet Gupta  * The mapping is Column-first.
676af617428SVineet Gupta  *		---------------------	-----------
677af617428SVineet Gupta  *		|way0|way1|way2|way3|	|way0|way1|
6786ce18798SVineet Gupta  *		---------------------	-----------
6796ce18798SVineet Gupta  * [set0]	|  0 |  1 |  2 |  3 |	|  0 |  1 |
6806ce18798SVineet Gupta  * [set1]	|  4 |  5 |  6 |  7 |	|  2 |  3 |
6816ce18798SVineet Gupta  *		~		    ~	~	  ~
6826ce18798SVineet Gupta  * [set127]	| 508| 509| 510| 511|	| 254| 255|
6835a364c2aSVineet Gupta  *		---------------------	-----------
6845a364c2aSVineet Gupta  * For normal operations we don't(must not) care how above works since
6855a364c2aSVineet Gupta  * MMU cmd getIndex(vaddr) abstracts that out.
68689d0d424SVineet Gupta  * However for walking WAYS of a SET, we need to know this
68789d0d424SVineet Gupta  */
68841195d23SVineet Gupta #define SET_WAY_TO_IDX(mmu, set, way)  ((set) * mmu->ways + (way))
68989d0d424SVineet Gupta 
69089d0d424SVineet Gupta /* Handling of Duplicate PD (TLB entry) in MMU.
691b5ddb6d5SVineet Gupta  * -Could be due to buggy customer tapeouts or obscure kernel bugs
692b5ddb6d5SVineet Gupta  * -MMU complaints not at the time of duplicate PD installation, but at the
693b5ddb6d5SVineet Gupta  *      time of lookup matching multiple ways.
694cc562d2eSVineet Gupta  * -Ideally these should never happen - but if they do - workaround by deleting
695cc562d2eSVineet Gupta  *      the duplicate one.
696cc562d2eSVineet Gupta  * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
697cc562d2eSVineet Gupta  */
698cc562d2eSVineet Gupta volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
699cc562d2eSVineet Gupta 
do_tlb_overlap_fault(unsigned long cause,unsigned long address,struct pt_regs * regs)700cc562d2eSVineet Gupta void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
701cc562d2eSVineet Gupta 			  struct pt_regs *regs)
702cc562d2eSVineet Gupta {
703cc562d2eSVineet Gupta 	struct cpuinfo_arc_mmu *mmu = &mmuinfo;
704cc562d2eSVineet Gupta 	unsigned long flags;
705cc562d2eSVineet Gupta 	int set, n_ways = mmu->ways;
706cc562d2eSVineet Gupta 
707cc562d2eSVineet Gupta 	n_ways = min(n_ways, 4);
708cc562d2eSVineet Gupta 	BUG_ON(mmu->ways > 4);
709cc562d2eSVineet Gupta 
710cc562d2eSVineet Gupta 	local_irq_save(flags);
711cc562d2eSVineet Gupta 
712cc562d2eSVineet Gupta 	/* loop thru all sets of TLB */
713cc562d2eSVineet Gupta 	for (set = 0; set < mmu->sets; set++) {
714cc562d2eSVineet Gupta 
715cc562d2eSVineet Gupta 		int is_valid, way;
716cc562d2eSVineet Gupta 		unsigned int pd0[4];
717cc562d2eSVineet Gupta 
718cc562d2eSVineet Gupta 		/* read out all the ways of current set */
719cc562d2eSVineet Gupta 		for (way = 0, is_valid = 0; way < n_ways; way++) {
720cc562d2eSVineet Gupta 			write_aux_reg(ARC_REG_TLBINDEX,
7215f840df5SFlavio Suligoi 					  SET_WAY_TO_IDX(mmu, set, way));
722cc562d2eSVineet Gupta 			write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
723cc562d2eSVineet Gupta 			pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
724cc562d2eSVineet Gupta 			is_valid |= pd0[way] & _PAGE_PRESENT;
725cc562d2eSVineet Gupta 			pd0[way] &= PAGE_MASK;
726cc562d2eSVineet Gupta 		}
7278840e14cSVineet Gupta 
72889c92142SVineet Gupta 		/* If all the WAYS in SET are empty, skip to next SET */
72989c92142SVineet Gupta 		if (!is_valid)
73089c92142SVineet Gupta 			continue;
73189c92142SVineet Gupta 
732cc562d2eSVineet Gupta 		/* Scan the set for duplicate ways: needs a nested loop */
733cc562d2eSVineet Gupta 		for (way = 0; way < n_ways - 1; way++) {
734cc562d2eSVineet Gupta 
735cc562d2eSVineet Gupta 			int n;
736cc562d2eSVineet Gupta 
737cc562d2eSVineet Gupta 			if (!pd0[way])
7388840e14cSVineet Gupta 				continue;
73989c92142SVineet Gupta 
7408840e14cSVineet Gupta 			for (n = way + 1; n < n_ways; n++) {
741cc562d2eSVineet Gupta 				if (pd0[way] != pd0[n])
74289c92142SVineet Gupta 					continue;
743cc562d2eSVineet Gupta 
744cc562d2eSVineet Gupta 				if (!dup_pd_silent)
745cc562d2eSVineet Gupta 					pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
746cc562d2eSVineet Gupta 						pd0[way], set, way, n);
747cc562d2eSVineet Gupta 
7488840e14cSVineet Gupta 				/*
749cc562d2eSVineet Gupta 				 * clear entry @way and not @n.
750cc562d2eSVineet Gupta 				 * This is critical to our optimised loop
751cc562d2eSVineet Gupta 				 */
752cc562d2eSVineet Gupta 				pd0[way] = 0;
753cc562d2eSVineet Gupta 				write_aux_reg(ARC_REG_TLBINDEX,
754cc562d2eSVineet Gupta 						SET_WAY_TO_IDX(mmu, set, way));
755cc562d2eSVineet Gupta 				__tlb_entry_erase();
75689c92142SVineet Gupta 			}
7578840e14cSVineet Gupta 		}
7588840e14cSVineet Gupta 	}
7598840e14cSVineet Gupta 
760cc562d2eSVineet Gupta 	local_irq_restore(flags);
761cc562d2eSVineet Gupta }
762cc562d2eSVineet Gupta