xref: /openbmc/linux/arch/arc/mm/tlb.c (revision c5f756d8)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f1f3347dSVineet Gupta /*
3f1f3347dSVineet Gupta  * TLB Management (flush/create/diagnostics) for ARC700
4f1f3347dSVineet Gupta  *
5f1f3347dSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6f1f3347dSVineet Gupta  *
7d79e678dSVineet Gupta  * vineetg: Aug 2011
8d79e678dSVineet Gupta  *  -Reintroduce duplicate PD fixup - some customer chips still have the issue
9d79e678dSVineet Gupta  *
10d79e678dSVineet Gupta  * vineetg: May 2011
11d79e678dSVineet Gupta  *  -No need to flush_cache_page( ) for each call to update_mmu_cache()
12d79e678dSVineet Gupta  *   some of the LMBench tests improved amazingly
13d79e678dSVineet Gupta  *      = page-fault thrice as fast (75 usec to 28 usec)
14d79e678dSVineet Gupta  *      = mmap twice as fast (9.6 msec to 4.6 msec),
15d79e678dSVineet Gupta  *      = fork (5.3 msec to 3.7 msec)
16d79e678dSVineet Gupta  *
17d79e678dSVineet Gupta  * vineetg: April 2011 :
18d79e678dSVineet Gupta  *  -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
19d79e678dSVineet Gupta  *      helps avoid a shift when preparing PD0 from PTE
20d79e678dSVineet Gupta  *
21d79e678dSVineet Gupta  * vineetg: April 2011 : Preparing for MMU V3
22d79e678dSVineet Gupta  *  -MMU v2/v3 BCRs decoded differently
23d79e678dSVineet Gupta  *  -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
24d79e678dSVineet Gupta  *  -tlb_entry_erase( ) can be void
25d79e678dSVineet Gupta  *  -local_flush_tlb_range( ):
26d79e678dSVineet Gupta  *      = need not "ceil" @end
27d79e678dSVineet Gupta  *      = walks MMU only if range spans < 32 entries, as opposed to 256
28d79e678dSVineet Gupta  *
29d79e678dSVineet Gupta  * Vineetg: Sept 10th 2008
30d79e678dSVineet Gupta  *  -Changes related to MMU v2 (Rel 4.8)
31d79e678dSVineet Gupta  *
32d79e678dSVineet Gupta  * Vineetg: Aug 29th 2008
335f840df5SFlavio Suligoi  *  -In TLB Flush operations (Metal Fix MMU) there is a explicit command to
34d79e678dSVineet Gupta  *    flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
35d79e678dSVineet Gupta  *    it fails. Thus need to load it with ANY valid value before invoking
36d79e678dSVineet Gupta  *    TLBIVUTLB cmd
37d79e678dSVineet Gupta  *
38d79e678dSVineet Gupta  * Vineetg: Aug 21th 2008:
39d79e678dSVineet Gupta  *  -Reduced the duration of IRQ lockouts in TLB Flush routines
405f840df5SFlavio Suligoi  *  -Multiple copies of TLB erase code separated into a "single" function
41d79e678dSVineet Gupta  *  -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
42d79e678dSVineet Gupta  *       in interrupt-safe region.
43d79e678dSVineet Gupta  *
44d79e678dSVineet Gupta  * Vineetg: April 23rd Bug #93131
457423cc0cSAdam Buchbinder  *    Problem: tlb_flush_kernel_range() doesn't do anything if the range to
46d79e678dSVineet Gupta  *              flush is more than the size of TLB itself.
47d79e678dSVineet Gupta  *
48d79e678dSVineet Gupta  * Rahul Trivedi : Codito Technologies 2004
49f1f3347dSVineet Gupta  */
50f1f3347dSVineet Gupta 
51f1f3347dSVineet Gupta #include <linux/module.h>
52483e9bcbSVineet Gupta #include <linux/bug.h>
53589ee628SIngo Molnar #include <linux/mm_types.h>
54589ee628SIngo Molnar 
55f1f3347dSVineet Gupta #include <asm/arcregs.h>
56d79e678dSVineet Gupta #include <asm/setup.h>
57f1f3347dSVineet Gupta #include <asm/mmu_context.h>
58da1677b0SVineet Gupta #include <asm/mmu.h>
59f1f3347dSVineet Gupta 
60d79e678dSVineet Gupta /*			Need for ARC MMU v2
61d79e678dSVineet Gupta  *
62d79e678dSVineet Gupta  * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
63d79e678dSVineet Gupta  * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
64d79e678dSVineet Gupta  * map into same set, there would be contention for the 2 ways causing severe
65d79e678dSVineet Gupta  * Thrashing.
66d79e678dSVineet Gupta  *
67d79e678dSVineet Gupta  * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
68d79e678dSVineet Gupta  * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
695f840df5SFlavio Suligoi  * Given this, the thrashing problem should never happen because once the 3
70d79e678dSVineet Gupta  * J-TLB entries are created (even though 3rd will knock out one of the prev
71d79e678dSVineet Gupta  * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
72d79e678dSVineet Gupta  *
73d79e678dSVineet Gupta  * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
74d79e678dSVineet Gupta  * This is a simple design for keeping them in sync. So what do we do?
75d79e678dSVineet Gupta  * The solution which James came up was pretty neat. It utilised the assoc
76d79e678dSVineet Gupta  * of uTLBs by not invalidating always but only when absolutely necessary.
77d79e678dSVineet Gupta  *
78d79e678dSVineet Gupta  * - Existing TLB commands work as before
79d79e678dSVineet Gupta  * - New command (TLBWriteNI) for TLB write without clearing uTLBs
80d79e678dSVineet Gupta  * - New command (TLBIVUTLB) to invalidate uTLBs.
81d79e678dSVineet Gupta  *
82d79e678dSVineet Gupta  * The uTLBs need only be invalidated when pages are being removed from the
83d79e678dSVineet Gupta  * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
84d79e678dSVineet Gupta  * as a result of a miss, the removed entry is still allowed to exist in the
85d79e678dSVineet Gupta  * uTLBs as it is still valid and present in the OS page table. This allows the
86d79e678dSVineet Gupta  * full associativity of the uTLBs to hide the limited associativity of the main
87d79e678dSVineet Gupta  * TLB.
88d79e678dSVineet Gupta  *
89d79e678dSVineet Gupta  * During a miss handler, the new "TLBWriteNI" command is used to load
90d79e678dSVineet Gupta  * entries without clearing the uTLBs.
91d79e678dSVineet Gupta  *
92d79e678dSVineet Gupta  * When the OS page table is updated, TLB entries that may be associated with a
93d79e678dSVineet Gupta  * removed page are removed (flushed) from the TLB using TLBWrite. In this
94d79e678dSVineet Gupta  * circumstance, the uTLBs must also be cleared. This is done by using the
95d79e678dSVineet Gupta  * existing TLBWrite command. An explicit IVUTLB is also required for those
96d79e678dSVineet Gupta  * corner cases when TLBWrite was not executed at all because the corresp
97d79e678dSVineet Gupta  * J-TLB entry got evicted/replaced.
98d79e678dSVineet Gupta  */
99d79e678dSVineet Gupta 
100da1677b0SVineet Gupta 
101f1f3347dSVineet Gupta /* A copy of the ASID from the PID reg is kept in asid_cache */
10263eca94cSVineet Gupta DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
103cc562d2eSVineet Gupta 
104b5ddb6d5SVineet Gupta static int __read_mostly pae_exists;
105b5ddb6d5SVineet Gupta 
106d79e678dSVineet Gupta /*
107d79e678dSVineet Gupta  * Utility Routine to erase a J-TLB entry
108483e9bcbSVineet Gupta  * Caller needs to setup Index Reg (manually or via getIndex)
109d79e678dSVineet Gupta  */
110483e9bcbSVineet Gupta static inline void __tlb_entry_erase(void)
111d79e678dSVineet Gupta {
112d79e678dSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD1, 0);
1135a364c2aSVineet Gupta 
1145a364c2aSVineet Gupta 	if (is_pae40_enabled())
1155a364c2aSVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
1165a364c2aSVineet Gupta 
117d79e678dSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, 0);
118d79e678dSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
119d79e678dSVineet Gupta }
120d79e678dSVineet Gupta 
1211355ea2eSVineet Gupta static void utlb_invalidate(void)
1221355ea2eSVineet Gupta {
1231355ea2eSVineet Gupta #if (CONFIG_ARC_MMU_VER >= 2)
1241355ea2eSVineet Gupta 
1251355ea2eSVineet Gupta #if (CONFIG_ARC_MMU_VER == 2)
1261355ea2eSVineet Gupta 	/* MMU v2 introduced the uTLB Flush command.
1271355ea2eSVineet Gupta 	 * There was however an obscure hardware bug, where uTLB flush would
1281355ea2eSVineet Gupta 	 * fail when a prior probe for J-TLB (both totally unrelated) would
1291355ea2eSVineet Gupta 	 * return lkup err - because the entry didn't exist in MMU.
1305f840df5SFlavio Suligoi 	 * The Workaround was to set Index reg with some valid value, prior to
1311355ea2eSVineet Gupta 	 * flush. This was fixed in MMU v3
1321355ea2eSVineet Gupta 	 */
1331355ea2eSVineet Gupta 	unsigned int idx;
1341355ea2eSVineet Gupta 
1351355ea2eSVineet Gupta 	/* make sure INDEX Reg is valid */
1361355ea2eSVineet Gupta 	idx = read_aux_reg(ARC_REG_TLBINDEX);
1371355ea2eSVineet Gupta 
1381355ea2eSVineet Gupta 	/* If not write some dummy val */
1391355ea2eSVineet Gupta 	if (unlikely(idx & TLB_LKUP_ERR))
1401355ea2eSVineet Gupta 		write_aux_reg(ARC_REG_TLBINDEX, 0xa);
1411355ea2eSVineet Gupta #endif
1421355ea2eSVineet Gupta 
1431355ea2eSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
1441355ea2eSVineet Gupta #endif
1451355ea2eSVineet Gupta 
1461355ea2eSVineet Gupta }
1471355ea2eSVineet Gupta 
148d7a512bfSVineet Gupta #if (CONFIG_ARC_MMU_VER < 4)
149d7a512bfSVineet Gupta 
150483e9bcbSVineet Gupta static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
151483e9bcbSVineet Gupta {
152483e9bcbSVineet Gupta 	unsigned int idx;
153483e9bcbSVineet Gupta 
154483e9bcbSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
155483e9bcbSVineet Gupta 
156483e9bcbSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
157483e9bcbSVineet Gupta 	idx = read_aux_reg(ARC_REG_TLBINDEX);
158483e9bcbSVineet Gupta 
159483e9bcbSVineet Gupta 	return idx;
160483e9bcbSVineet Gupta }
161483e9bcbSVineet Gupta 
162d79e678dSVineet Gupta static void tlb_entry_erase(unsigned int vaddr_n_asid)
163d79e678dSVineet Gupta {
164d79e678dSVineet Gupta 	unsigned int idx;
165d79e678dSVineet Gupta 
166d79e678dSVineet Gupta 	/* Locate the TLB entry for this vaddr + ASID */
167483e9bcbSVineet Gupta 	idx = tlb_entry_lkup(vaddr_n_asid);
168d79e678dSVineet Gupta 
169d79e678dSVineet Gupta 	/* No error means entry found, zero it out */
170d79e678dSVineet Gupta 	if (likely(!(idx & TLB_LKUP_ERR))) {
171d79e678dSVineet Gupta 		__tlb_entry_erase();
172483e9bcbSVineet Gupta 	} else {
173d79e678dSVineet Gupta 		/* Duplicate entry error */
174483e9bcbSVineet Gupta 		WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
175d79e678dSVineet Gupta 					   vaddr_n_asid);
176d79e678dSVineet Gupta 	}
177d79e678dSVineet Gupta }
178d79e678dSVineet Gupta 
1795a364c2aSVineet Gupta static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
180483e9bcbSVineet Gupta {
181483e9bcbSVineet Gupta 	unsigned int idx;
182483e9bcbSVineet Gupta 
183483e9bcbSVineet Gupta 	/*
184483e9bcbSVineet Gupta 	 * First verify if entry for this vaddr+ASID already exists
185483e9bcbSVineet Gupta 	 * This also sets up PD0 (vaddr, ASID..) for final commit
186483e9bcbSVineet Gupta 	 */
187483e9bcbSVineet Gupta 	idx = tlb_entry_lkup(pd0);
188483e9bcbSVineet Gupta 
189483e9bcbSVineet Gupta 	/*
190483e9bcbSVineet Gupta 	 * If Not already present get a free slot from MMU.
191483e9bcbSVineet Gupta 	 * Otherwise, Probe would have located the entry and set INDEX Reg
192483e9bcbSVineet Gupta 	 * with existing location. This will cause Write CMD to over-write
193483e9bcbSVineet Gupta 	 * existing entry with new PD0 and PD1
194483e9bcbSVineet Gupta 	 */
195483e9bcbSVineet Gupta 	if (likely(idx & TLB_LKUP_ERR))
196483e9bcbSVineet Gupta 		write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
197483e9bcbSVineet Gupta 
198483e9bcbSVineet Gupta 	/* setup the other half of TLB entry (pfn, rwx..) */
199483e9bcbSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD1, pd1);
200483e9bcbSVineet Gupta 
201483e9bcbSVineet Gupta 	/*
202483e9bcbSVineet Gupta 	 * Commit the Entry to MMU
2037423cc0cSAdam Buchbinder 	 * It doesn't sound safe to use the TLBWriteNI cmd here
204483e9bcbSVineet Gupta 	 * which doesn't flush uTLBs. I'd rather be safe than sorry.
205483e9bcbSVineet Gupta 	 */
206483e9bcbSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
207483e9bcbSVineet Gupta }
208483e9bcbSVineet Gupta 
209d7a512bfSVineet Gupta #else	/* CONFIG_ARC_MMU_VER >= 4) */
210d7a512bfSVineet Gupta 
211d7a512bfSVineet Gupta static void tlb_entry_erase(unsigned int vaddr_n_asid)
212d7a512bfSVineet Gupta {
213d7a512bfSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
214d7a512bfSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
215d7a512bfSVineet Gupta }
216d7a512bfSVineet Gupta 
2175a364c2aSVineet Gupta static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
218d7a512bfSVineet Gupta {
219d7a512bfSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, pd0);
220d7a512bfSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD1, pd1);
2215a364c2aSVineet Gupta 
2225a364c2aSVineet Gupta 	if (is_pae40_enabled())
2235a364c2aSVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
2245a364c2aSVineet Gupta 
225d7a512bfSVineet Gupta 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
226d7a512bfSVineet Gupta }
227d7a512bfSVineet Gupta 
228d7a512bfSVineet Gupta #endif
229d7a512bfSVineet Gupta 
230d79e678dSVineet Gupta /*
231d79e678dSVineet Gupta  * Un-conditionally (without lookup) erase the entire MMU contents
232d79e678dSVineet Gupta  */
233d79e678dSVineet Gupta 
234d79e678dSVineet Gupta noinline void local_flush_tlb_all(void)
235d79e678dSVineet Gupta {
236b598e17fSVineet Gupta 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
237d79e678dSVineet Gupta 	unsigned long flags;
238d79e678dSVineet Gupta 	unsigned int entry;
239b598e17fSVineet Gupta 	int num_tlb = mmu->sets * mmu->ways;
240d79e678dSVineet Gupta 
241d79e678dSVineet Gupta 	local_irq_save(flags);
242d79e678dSVineet Gupta 
243d79e678dSVineet Gupta 	/* Load PD0 and PD1 with template for a Blank Entry */
244d79e678dSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD1, 0);
2455a364c2aSVineet Gupta 
2465a364c2aSVineet Gupta 	if (is_pae40_enabled())
2475a364c2aSVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
2485a364c2aSVineet Gupta 
249d79e678dSVineet Gupta 	write_aux_reg(ARC_REG_TLBPD0, 0);
250d79e678dSVineet Gupta 
251b598e17fSVineet Gupta 	for (entry = 0; entry < num_tlb; entry++) {
252d79e678dSVineet Gupta 		/* write this entry to the TLB */
253d79e678dSVineet Gupta 		write_aux_reg(ARC_REG_TLBINDEX, entry);
2541355ea2eSVineet Gupta 		write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
255d79e678dSVineet Gupta 	}
256d79e678dSVineet Gupta 
257fe6c1b86SVineet Gupta 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
258fe6c1b86SVineet Gupta 		const int stlb_idx = 0x800;
259fe6c1b86SVineet Gupta 
260fe6c1b86SVineet Gupta 		/* Blank sTLB entry */
261fe6c1b86SVineet Gupta 		write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
262fe6c1b86SVineet Gupta 
263fe6c1b86SVineet Gupta 		for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
264fe6c1b86SVineet Gupta 			write_aux_reg(ARC_REG_TLBINDEX, entry);
2651355ea2eSVineet Gupta 			write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
266fe6c1b86SVineet Gupta 		}
267fe6c1b86SVineet Gupta 	}
268fe6c1b86SVineet Gupta 
269d79e678dSVineet Gupta 	utlb_invalidate();
270d79e678dSVineet Gupta 
271d79e678dSVineet Gupta 	local_irq_restore(flags);
272d79e678dSVineet Gupta }
273d79e678dSVineet Gupta 
274d79e678dSVineet Gupta /*
2755f840df5SFlavio Suligoi  * Flush the entire MM for userland. The fastest way is to move to Next ASID
276d79e678dSVineet Gupta  */
277d79e678dSVineet Gupta noinline void local_flush_tlb_mm(struct mm_struct *mm)
278d79e678dSVineet Gupta {
279d79e678dSVineet Gupta 	/*
280d79e678dSVineet Gupta 	 * Small optimisation courtesy IA64
281d79e678dSVineet Gupta 	 * flush_mm called during fork,exit,munmap etc, multiple times as well.
282d79e678dSVineet Gupta 	 * Only for fork( ) do we need to move parent to a new MMU ctxt,
283d79e678dSVineet Gupta 	 * all other cases are NOPs, hence this check.
284d79e678dSVineet Gupta 	 */
285d79e678dSVineet Gupta 	if (atomic_read(&mm->mm_users) == 0)
286d79e678dSVineet Gupta 		return;
287d79e678dSVineet Gupta 
288d79e678dSVineet Gupta 	/*
2893daa48d1SVineet Gupta 	 * - Move to a new ASID, but only if the mm is still wired in
2903daa48d1SVineet Gupta 	 *   (Android Binder ended up calling this for vma->mm != tsk->mm,
2913daa48d1SVineet Gupta 	 *    causing h/w - s/w ASID to get out of sync)
2923daa48d1SVineet Gupta 	 * - Also get_new_mmu_context() new implementation allocates a new
2933daa48d1SVineet Gupta 	 *   ASID only if it is not allocated already - so unallocate first
294d79e678dSVineet Gupta 	 */
295d79e678dSVineet Gupta 	destroy_context(mm);
2963daa48d1SVineet Gupta 	if (current->mm == mm)
297d79e678dSVineet Gupta 		get_new_mmu_context(mm);
298d79e678dSVineet Gupta }
299d79e678dSVineet Gupta 
300d79e678dSVineet Gupta /*
301d79e678dSVineet Gupta  * Flush a Range of TLB entries for userland.
302d79e678dSVineet Gupta  * @start is inclusive, while @end is exclusive
303d79e678dSVineet Gupta  * Difference between this and Kernel Range Flush is
304d79e678dSVineet Gupta  *  -Here the fastest way (if range is too large) is to move to next ASID
305d79e678dSVineet Gupta  *      without doing any explicit Shootdown
3065f840df5SFlavio Suligoi  *  -In case of kernel Flush, entry has to be shot down explicitly
307d79e678dSVineet Gupta  */
308d79e678dSVineet Gupta void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
309d79e678dSVineet Gupta 			   unsigned long end)
310d79e678dSVineet Gupta {
31163eca94cSVineet Gupta 	const unsigned int cpu = smp_processor_id();
312d79e678dSVineet Gupta 	unsigned long flags;
313d79e678dSVineet Gupta 
314d79e678dSVineet Gupta 	/* If range @start to @end is more than 32 TLB entries deep,
315d79e678dSVineet Gupta 	 * its better to move to a new ASID rather than searching for
316d79e678dSVineet Gupta 	 * individual entries and then shooting them down
317d79e678dSVineet Gupta 	 *
318d79e678dSVineet Gupta 	 * The calc above is rough, doesn't account for unaligned parts,
319d79e678dSVineet Gupta 	 * since this is heuristics based anyways
320d79e678dSVineet Gupta 	 */
321d79e678dSVineet Gupta 	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
322d79e678dSVineet Gupta 		local_flush_tlb_mm(vma->vm_mm);
323d79e678dSVineet Gupta 		return;
324d79e678dSVineet Gupta 	}
325d79e678dSVineet Gupta 
326d79e678dSVineet Gupta 	/*
327d79e678dSVineet Gupta 	 * @start moved to page start: this alone suffices for checking
328d79e678dSVineet Gupta 	 * loop end condition below, w/o need for aligning @end to end
329d79e678dSVineet Gupta 	 * e.g. 2000 to 4001 will anyhow loop twice
330d79e678dSVineet Gupta 	 */
331d79e678dSVineet Gupta 	start &= PAGE_MASK;
332d79e678dSVineet Gupta 
333d79e678dSVineet Gupta 	local_irq_save(flags);
334d79e678dSVineet Gupta 
33563eca94cSVineet Gupta 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
336d79e678dSVineet Gupta 		while (start < end) {
33763eca94cSVineet Gupta 			tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
338d79e678dSVineet Gupta 			start += PAGE_SIZE;
339d79e678dSVineet Gupta 		}
340d79e678dSVineet Gupta 	}
341d79e678dSVineet Gupta 
342d79e678dSVineet Gupta 	local_irq_restore(flags);
343d79e678dSVineet Gupta }
344d79e678dSVineet Gupta 
345d79e678dSVineet Gupta /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
346d79e678dSVineet Gupta  *  @start, @end interpreted as kvaddr
347d79e678dSVineet Gupta  * Interestingly, shared TLB entries can also be flushed using just
348d79e678dSVineet Gupta  * @start,@end alone (interpreted as user vaddr), although technically SASID
349d79e678dSVineet Gupta  * is also needed. However our smart TLbProbe lookup takes care of that.
350d79e678dSVineet Gupta  */
351d79e678dSVineet Gupta void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
352d79e678dSVineet Gupta {
353d79e678dSVineet Gupta 	unsigned long flags;
354d79e678dSVineet Gupta 
355d79e678dSVineet Gupta 	/* exactly same as above, except for TLB entry not taking ASID */
356d79e678dSVineet Gupta 
357d79e678dSVineet Gupta 	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
358d79e678dSVineet Gupta 		local_flush_tlb_all();
359d79e678dSVineet Gupta 		return;
360d79e678dSVineet Gupta 	}
361d79e678dSVineet Gupta 
362d79e678dSVineet Gupta 	start &= PAGE_MASK;
363d79e678dSVineet Gupta 
364d79e678dSVineet Gupta 	local_irq_save(flags);
365d79e678dSVineet Gupta 	while (start < end) {
366d79e678dSVineet Gupta 		tlb_entry_erase(start);
367d79e678dSVineet Gupta 		start += PAGE_SIZE;
368d79e678dSVineet Gupta 	}
369d79e678dSVineet Gupta 
370d79e678dSVineet Gupta 	local_irq_restore(flags);
371d79e678dSVineet Gupta }
372d79e678dSVineet Gupta 
373d79e678dSVineet Gupta /*
374d79e678dSVineet Gupta  * Delete TLB entry in MMU for a given page (??? address)
375d79e678dSVineet Gupta  * NOTE One TLB entry contains translation for single PAGE
376d79e678dSVineet Gupta  */
377d79e678dSVineet Gupta 
378d79e678dSVineet Gupta void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
379d79e678dSVineet Gupta {
38063eca94cSVineet Gupta 	const unsigned int cpu = smp_processor_id();
381d79e678dSVineet Gupta 	unsigned long flags;
382d79e678dSVineet Gupta 
383d79e678dSVineet Gupta 	/* Note that it is critical that interrupts are DISABLED between
384d79e678dSVineet Gupta 	 * checking the ASID and using it flush the TLB entry
385d79e678dSVineet Gupta 	 */
386d79e678dSVineet Gupta 	local_irq_save(flags);
387d79e678dSVineet Gupta 
38863eca94cSVineet Gupta 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
38963eca94cSVineet Gupta 		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
390d79e678dSVineet Gupta 	}
391d79e678dSVineet Gupta 
392d79e678dSVineet Gupta 	local_irq_restore(flags);
393d79e678dSVineet Gupta }
394cc562d2eSVineet Gupta 
3955ea72a90SVineet Gupta #ifdef CONFIG_SMP
3965ea72a90SVineet Gupta 
3975ea72a90SVineet Gupta struct tlb_args {
3985ea72a90SVineet Gupta 	struct vm_area_struct *ta_vma;
3995ea72a90SVineet Gupta 	unsigned long ta_start;
4005ea72a90SVineet Gupta 	unsigned long ta_end;
4015ea72a90SVineet Gupta };
4025ea72a90SVineet Gupta 
4035ea72a90SVineet Gupta static inline void ipi_flush_tlb_page(void *arg)
4045ea72a90SVineet Gupta {
4055ea72a90SVineet Gupta 	struct tlb_args *ta = arg;
4065ea72a90SVineet Gupta 
4075ea72a90SVineet Gupta 	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
4085ea72a90SVineet Gupta }
4095ea72a90SVineet Gupta 
4105ea72a90SVineet Gupta static inline void ipi_flush_tlb_range(void *arg)
4115ea72a90SVineet Gupta {
4125ea72a90SVineet Gupta 	struct tlb_args *ta = arg;
4135ea72a90SVineet Gupta 
4145ea72a90SVineet Gupta 	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
4155ea72a90SVineet Gupta }
4165ea72a90SVineet Gupta 
417c7119d56SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
418c7119d56SVineet Gupta static inline void ipi_flush_pmd_tlb_range(void *arg)
419c7119d56SVineet Gupta {
420c7119d56SVineet Gupta 	struct tlb_args *ta = arg;
421c7119d56SVineet Gupta 
422c7119d56SVineet Gupta 	local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
423c7119d56SVineet Gupta }
424c7119d56SVineet Gupta #endif
425c7119d56SVineet Gupta 
4265ea72a90SVineet Gupta static inline void ipi_flush_tlb_kernel_range(void *arg)
4275ea72a90SVineet Gupta {
4285ea72a90SVineet Gupta 	struct tlb_args *ta = (struct tlb_args *)arg;
4295ea72a90SVineet Gupta 
4305ea72a90SVineet Gupta 	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
4315ea72a90SVineet Gupta }
4325ea72a90SVineet Gupta 
4335ea72a90SVineet Gupta void flush_tlb_all(void)
4345ea72a90SVineet Gupta {
4355ea72a90SVineet Gupta 	on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
4365ea72a90SVineet Gupta }
4375ea72a90SVineet Gupta 
4385ea72a90SVineet Gupta void flush_tlb_mm(struct mm_struct *mm)
4395ea72a90SVineet Gupta {
4405ea72a90SVineet Gupta 	on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
4415ea72a90SVineet Gupta 			 mm, 1);
4425ea72a90SVineet Gupta }
4435ea72a90SVineet Gupta 
4445ea72a90SVineet Gupta void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
4455ea72a90SVineet Gupta {
4465ea72a90SVineet Gupta 	struct tlb_args ta = {
4475ea72a90SVineet Gupta 		.ta_vma = vma,
4485ea72a90SVineet Gupta 		.ta_start = uaddr
4495ea72a90SVineet Gupta 	};
4505ea72a90SVineet Gupta 
4515ea72a90SVineet Gupta 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
4525ea72a90SVineet Gupta }
4535ea72a90SVineet Gupta 
4545ea72a90SVineet Gupta void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
4555ea72a90SVineet Gupta 		     unsigned long end)
4565ea72a90SVineet Gupta {
4575ea72a90SVineet Gupta 	struct tlb_args ta = {
4585ea72a90SVineet Gupta 		.ta_vma = vma,
4595ea72a90SVineet Gupta 		.ta_start = start,
4605ea72a90SVineet Gupta 		.ta_end = end
4615ea72a90SVineet Gupta 	};
4625ea72a90SVineet Gupta 
4635ea72a90SVineet Gupta 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
4645ea72a90SVineet Gupta }
4655ea72a90SVineet Gupta 
466c7119d56SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
467c7119d56SVineet Gupta void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
468c7119d56SVineet Gupta 			 unsigned long end)
469c7119d56SVineet Gupta {
470c7119d56SVineet Gupta 	struct tlb_args ta = {
471c7119d56SVineet Gupta 		.ta_vma = vma,
472c7119d56SVineet Gupta 		.ta_start = start,
473c7119d56SVineet Gupta 		.ta_end = end
474c7119d56SVineet Gupta 	};
475c7119d56SVineet Gupta 
476c7119d56SVineet Gupta 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
477c7119d56SVineet Gupta }
478c7119d56SVineet Gupta #endif
479c7119d56SVineet Gupta 
4805ea72a90SVineet Gupta void flush_tlb_kernel_range(unsigned long start, unsigned long end)
4815ea72a90SVineet Gupta {
4825ea72a90SVineet Gupta 	struct tlb_args ta = {
4835ea72a90SVineet Gupta 		.ta_start = start,
4845ea72a90SVineet Gupta 		.ta_end = end
4855ea72a90SVineet Gupta 	};
4865ea72a90SVineet Gupta 
4875ea72a90SVineet Gupta 	on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
4885ea72a90SVineet Gupta }
4895ea72a90SVineet Gupta #endif
4905ea72a90SVineet Gupta 
491cc562d2eSVineet Gupta /*
492cc562d2eSVineet Gupta  * Routine to create a TLB entry
493cc562d2eSVineet Gupta  */
49428b4af72SVineet Gupta void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
495cc562d2eSVineet Gupta {
496cc562d2eSVineet Gupta 	unsigned long flags;
497483e9bcbSVineet Gupta 	unsigned int asid_or_sasid, rwx;
4985a364c2aSVineet Gupta 	unsigned long pd0;
4995a364c2aSVineet Gupta 	pte_t pd1;
500cc562d2eSVineet Gupta 
501cc562d2eSVineet Gupta 	/*
502cc562d2eSVineet Gupta 	 * create_tlb() assumes that current->mm == vma->mm, since
503cc562d2eSVineet Gupta 	 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
504cc562d2eSVineet Gupta 	 * -completes the lazy write to SASID reg (again valid for curr tsk)
505cc562d2eSVineet Gupta 	 *
506cc562d2eSVineet Gupta 	 * Removing the assumption involves
507cc562d2eSVineet Gupta 	 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
508cc562d2eSVineet Gupta 	 * -Fix the TLB paranoid debug code to not trigger false negatives.
509cc562d2eSVineet Gupta 	 * -More importantly it makes this handler inconsistent with fast-path
510cc562d2eSVineet Gupta 	 *  TLB Refill handler which always deals with "current"
511cc562d2eSVineet Gupta 	 *
512cc562d2eSVineet Gupta 	 * Lets see the use cases when current->mm != vma->mm and we land here
513cc562d2eSVineet Gupta 	 *  1. execve->copy_strings()->__get_user_pages->handle_mm_fault
514cc562d2eSVineet Gupta 	 *     Here VM wants to pre-install a TLB entry for user stack while
515cc562d2eSVineet Gupta 	 *     current->mm still points to pre-execve mm (hence the condition).
516cc562d2eSVineet Gupta 	 *     However the stack vaddr is soon relocated (randomization) and
517cc562d2eSVineet Gupta 	 *     move_page_tables() tries to undo that TLB entry.
518cc562d2eSVineet Gupta 	 *     Thus not creating TLB entry is not any worse.
519cc562d2eSVineet Gupta 	 *
520cc562d2eSVineet Gupta 	 *  2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
521cc562d2eSVineet Gupta 	 *     breakpoint in debugged task. Not creating a TLB now is not
522cc562d2eSVineet Gupta 	 *     performance critical.
523cc562d2eSVineet Gupta 	 *
524cc562d2eSVineet Gupta 	 * Both the cases above are not good enough for code churn.
525cc562d2eSVineet Gupta 	 */
526cc562d2eSVineet Gupta 	if (current->active_mm != vma->vm_mm)
527cc562d2eSVineet Gupta 		return;
528cc562d2eSVineet Gupta 
529cc562d2eSVineet Gupta 	local_irq_save(flags);
530cc562d2eSVineet Gupta 
53128b4af72SVineet Gupta 	tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
532cc562d2eSVineet Gupta 
53328b4af72SVineet Gupta 	vaddr &= PAGE_MASK;
534cc562d2eSVineet Gupta 
535cc562d2eSVineet Gupta 	/* update this PTE credentials */
536cc562d2eSVineet Gupta 	pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
537cc562d2eSVineet Gupta 
538d091fcb9SVineet Gupta 	/* Create HW TLB(PD0,PD1) from PTE  */
539cc562d2eSVineet Gupta 
540cc562d2eSVineet Gupta 	/* ASID for this task */
541cc562d2eSVineet Gupta 	asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
542cc562d2eSVineet Gupta 
54328b4af72SVineet Gupta 	pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
544cc562d2eSVineet Gupta 
54564b703efSVineet Gupta 	/*
54664b703efSVineet Gupta 	 * ARC MMU provides fully orthogonal access bits for K/U mode,
54764b703efSVineet Gupta 	 * however Linux only saves 1 set to save PTE real-estate
54864b703efSVineet Gupta 	 * Here we convert 3 PTE bits into 6 MMU bits:
54964b703efSVineet Gupta 	 * -Kernel only entries have Kr Kw Kx 0 0 0
55064b703efSVineet Gupta 	 * -User entries have mirrored K and U bits
55164b703efSVineet Gupta 	 */
55264b703efSVineet Gupta 	rwx = pte_val(*ptep) & PTE_BITS_RWX;
55364b703efSVineet Gupta 
55464b703efSVineet Gupta 	if (pte_val(*ptep) & _PAGE_GLOBAL)
55564b703efSVineet Gupta 		rwx <<= 3;		/* r w x => Kr Kw Kx 0 0 0 */
55664b703efSVineet Gupta 	else
55764b703efSVineet Gupta 		rwx |= (rwx << 3);	/* r w x => Kr Kw Kx Ur Uw Ux */
55864b703efSVineet Gupta 
559483e9bcbSVineet Gupta 	pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
560cc562d2eSVineet Gupta 
561483e9bcbSVineet Gupta 	tlb_entry_insert(pd0, pd1);
562cc562d2eSVineet Gupta 
563cc562d2eSVineet Gupta 	local_irq_restore(flags);
564cc562d2eSVineet Gupta }
565cc562d2eSVineet Gupta 
566eacd0e95SVineet Gupta /*
567eacd0e95SVineet Gupta  * Called at the end of pagefault, for a userspace mapped page
568eacd0e95SVineet Gupta  *  -pre-install the corresponding TLB entry into MMU
5694102b533SVineet Gupta  *  -Finalize the delayed D-cache flush of kernel mapping of page due to
5704102b533SVineet Gupta  *  	flush_dcache_page(), copy_user_page()
5714102b533SVineet Gupta  *
5724102b533SVineet Gupta  * Note that flush (when done) involves both WBACK - so physical page is
5734102b533SVineet Gupta  * in sync as well as INV - so any non-congruent aliases don't remain
574cc562d2eSVineet Gupta  */
57524603fddSVineet Gupta void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
576cc562d2eSVineet Gupta 		      pte_t *ptep)
577cc562d2eSVineet Gupta {
57824603fddSVineet Gupta 	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
579*c5f756d8SVladimir Isaev 	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
58029b93c68SVineet Gupta 	struct page *page = pfn_to_page(pte_pfn(*ptep));
581cc562d2eSVineet Gupta 
58224603fddSVineet Gupta 	create_tlb(vma, vaddr, ptep);
58324603fddSVineet Gupta 
58429b93c68SVineet Gupta 	if (page == ZERO_PAGE(0)) {
58529b93c68SVineet Gupta 		return;
58629b93c68SVineet Gupta 	}
58729b93c68SVineet Gupta 
5884102b533SVineet Gupta 	/*
5894102b533SVineet Gupta 	 * Exec page : Independent of aliasing/page-color considerations,
5904102b533SVineet Gupta 	 *	       since icache doesn't snoop dcache on ARC, any dirty
5914102b533SVineet Gupta 	 *	       K-mapping of a code page needs to be wback+inv so that
5924102b533SVineet Gupta 	 *	       icache fetch by userspace sees code correctly.
5934102b533SVineet Gupta 	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
5944102b533SVineet Gupta 	 *	       so userspace sees the right data.
5954102b533SVineet Gupta 	 *  (Avoids the flush for Non-exec + congruent mapping case)
5964102b533SVineet Gupta 	 */
5973e87974dSVineet Gupta 	if ((vma->vm_flags & VM_EXEC) ||
5983e87974dSVineet Gupta 	     addr_not_cache_congruent(paddr, vaddr)) {
599eacd0e95SVineet Gupta 
6002ed21daeSVineet Gupta 		int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
601eacd0e95SVineet Gupta 		if (dirty) {
60261a16348SVineet Gupta 			/* wback + inv dcache lines (K-mapping) */
6036ec18a81SVineet Gupta 			__flush_dcache_page(paddr, paddr);
6044102b533SVineet Gupta 
60561a16348SVineet Gupta 			/* invalidate any existing icache lines (U-mapping) */
6064102b533SVineet Gupta 			if (vma->vm_flags & VM_EXEC)
60724603fddSVineet Gupta 				__inv_icache_page(paddr, vaddr);
60824603fddSVineet Gupta 		}
609cc562d2eSVineet Gupta 	}
610eacd0e95SVineet Gupta }
611cc562d2eSVineet Gupta 
612fe6c1b86SVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
613fe6c1b86SVineet Gupta 
614fe6c1b86SVineet Gupta /*
615fe6c1b86SVineet Gupta  * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
616fe6c1b86SVineet Gupta  * support.
617fe6c1b86SVineet Gupta  *
618fe6c1b86SVineet Gupta  * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
6197423cc0cSAdam Buchbinder  * new bit "SZ" in TLB page descriptor to distinguish between them.
620fe6c1b86SVineet Gupta  * Super Page size is configurable in hardware (4K to 16M), but fixed once
621fe6c1b86SVineet Gupta  * RTL builds.
622fe6c1b86SVineet Gupta  *
6235f840df5SFlavio Suligoi  * The exact THP size a Linux configuration will support is a function of:
624fe6c1b86SVineet Gupta  *  - MMU page size (typical 8K, RTL fixed)
625fe6c1b86SVineet Gupta  *  - software page walker address split between PGD:PTE:PFN (typical
626fe6c1b86SVineet Gupta  *    11:8:13, but can be changed with 1 line)
627fe6c1b86SVineet Gupta  * So for above default, THP size supported is 8K * (2^8) = 2M
628fe6c1b86SVineet Gupta  *
629fe6c1b86SVineet Gupta  * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
630fe6c1b86SVineet Gupta  * reduces to 1 level (as PTE is folded into PGD and canonically referred
631fe6c1b86SVineet Gupta  * to as PMD).
632fe6c1b86SVineet Gupta  * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
633fe6c1b86SVineet Gupta  */
634fe6c1b86SVineet Gupta 
635fe6c1b86SVineet Gupta void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
636fe6c1b86SVineet Gupta 				 pmd_t *pmd)
637fe6c1b86SVineet Gupta {
638fe6c1b86SVineet Gupta 	pte_t pte = __pte(pmd_val(*pmd));
639fe6c1b86SVineet Gupta 	update_mmu_cache(vma, addr, &pte);
640fe6c1b86SVineet Gupta }
641fe6c1b86SVineet Gupta 
642fe6c1b86SVineet Gupta void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
643fe6c1b86SVineet Gupta 				pgtable_t pgtable)
644fe6c1b86SVineet Gupta {
645fe6c1b86SVineet Gupta 	struct list_head *lh = (struct list_head *) pgtable;
646fe6c1b86SVineet Gupta 
647fe6c1b86SVineet Gupta 	assert_spin_locked(&mm->page_table_lock);
648fe6c1b86SVineet Gupta 
649fe6c1b86SVineet Gupta 	/* FIFO */
650fe6c1b86SVineet Gupta 	if (!pmd_huge_pte(mm, pmdp))
651fe6c1b86SVineet Gupta 		INIT_LIST_HEAD(lh);
652fe6c1b86SVineet Gupta 	else
653fe6c1b86SVineet Gupta 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
654fe6c1b86SVineet Gupta 	pmd_huge_pte(mm, pmdp) = pgtable;
655fe6c1b86SVineet Gupta }
656fe6c1b86SVineet Gupta 
657fe6c1b86SVineet Gupta pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
658fe6c1b86SVineet Gupta {
659fe6c1b86SVineet Gupta 	struct list_head *lh;
660fe6c1b86SVineet Gupta 	pgtable_t pgtable;
661fe6c1b86SVineet Gupta 
662fe6c1b86SVineet Gupta 	assert_spin_locked(&mm->page_table_lock);
663fe6c1b86SVineet Gupta 
664fe6c1b86SVineet Gupta 	pgtable = pmd_huge_pte(mm, pmdp);
665fe6c1b86SVineet Gupta 	lh = (struct list_head *) pgtable;
666fe6c1b86SVineet Gupta 	if (list_empty(lh))
667fe6c1b86SVineet Gupta 		pmd_huge_pte(mm, pmdp) = NULL;
668fe6c1b86SVineet Gupta 	else {
669fe6c1b86SVineet Gupta 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
670fe6c1b86SVineet Gupta 		list_del(lh);
671fe6c1b86SVineet Gupta 	}
672fe6c1b86SVineet Gupta 
673fe6c1b86SVineet Gupta 	pte_val(pgtable[0]) = 0;
674fe6c1b86SVineet Gupta 	pte_val(pgtable[1]) = 0;
675fe6c1b86SVineet Gupta 
676fe6c1b86SVineet Gupta 	return pgtable;
677fe6c1b86SVineet Gupta }
678fe6c1b86SVineet Gupta 
679c7119d56SVineet Gupta void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
680722fe8fdSVineet Gupta 			       unsigned long end)
681722fe8fdSVineet Gupta {
682722fe8fdSVineet Gupta 	unsigned int cpu;
683722fe8fdSVineet Gupta 	unsigned long flags;
684722fe8fdSVineet Gupta 
685722fe8fdSVineet Gupta 	local_irq_save(flags);
686722fe8fdSVineet Gupta 
687722fe8fdSVineet Gupta 	cpu = smp_processor_id();
688722fe8fdSVineet Gupta 
689722fe8fdSVineet Gupta 	if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
690722fe8fdSVineet Gupta 		unsigned int asid = hw_pid(vma->vm_mm, cpu);
691722fe8fdSVineet Gupta 
692722fe8fdSVineet Gupta 		/* No need to loop here: this will always be for 1 Huge Page */
693722fe8fdSVineet Gupta 		tlb_entry_erase(start | _PAGE_HW_SZ | asid);
694722fe8fdSVineet Gupta 	}
695722fe8fdSVineet Gupta 
696722fe8fdSVineet Gupta 	local_irq_restore(flags);
697722fe8fdSVineet Gupta }
698722fe8fdSVineet Gupta 
699fe6c1b86SVineet Gupta #endif
700fe6c1b86SVineet Gupta 
7015f840df5SFlavio Suligoi /* Read the Cache Build Configuration Registers, Decode them and save into
702cc562d2eSVineet Gupta  * the cpuinfo structure for later use.
703cc562d2eSVineet Gupta  * No Validation is done here, simply read/convert the BCRs
704cc562d2eSVineet Gupta  */
705ce759956SPaul Gortmaker void read_decode_mmu_bcr(void)
706cc562d2eSVineet Gupta {
707cc562d2eSVineet Gupta 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
708da1677b0SVineet Gupta 	unsigned int tmp;
709da1677b0SVineet Gupta 	struct bcr_mmu_1_2 {
710da1677b0SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN
711da1677b0SVineet Gupta 		unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
712da1677b0SVineet Gupta #else
713da1677b0SVineet Gupta 		unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
714da1677b0SVineet Gupta #endif
715da1677b0SVineet Gupta 	} *mmu2;
716da1677b0SVineet Gupta 
717da1677b0SVineet Gupta 	struct bcr_mmu_3 {
718da1677b0SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN
719d0890ea5SVineet Gupta 	unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
720da1677b0SVineet Gupta 		     u_itlb:4, u_dtlb:4;
721da1677b0SVineet Gupta #else
722d0890ea5SVineet Gupta 	unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
723da1677b0SVineet Gupta 		     ways:4, ver:8;
724da1677b0SVineet Gupta #endif
725da1677b0SVineet Gupta 	} *mmu3;
726cc562d2eSVineet Gupta 
727d7a512bfSVineet Gupta 	struct bcr_mmu_4 {
728d7a512bfSVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN
729d7a512bfSVineet Gupta 	unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
730d7a512bfSVineet Gupta 		     n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
731d7a512bfSVineet Gupta #else
732d7a512bfSVineet Gupta 	/*           DTLB      ITLB      JES        JE         JA      */
733d7a512bfSVineet Gupta 	unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
734d7a512bfSVineet Gupta 		     pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
735d7a512bfSVineet Gupta #endif
736d7a512bfSVineet Gupta 	} *mmu4;
737d7a512bfSVineet Gupta 
738cc562d2eSVineet Gupta 	tmp = read_aux_reg(ARC_REG_MMU_BCR);
739cc562d2eSVineet Gupta 	mmu->ver = (tmp >> 24);
740cc562d2eSVineet Gupta 
74192d44128SVineet Gupta 	if (is_isa_arcompact()) {
742cc562d2eSVineet Gupta 		if (mmu->ver <= 2) {
743cc562d2eSVineet Gupta 			mmu2 = (struct bcr_mmu_1_2 *)&tmp;
744d0890ea5SVineet Gupta 			mmu->pg_sz_k = TO_KB(0x2000);
745cc562d2eSVineet Gupta 			mmu->sets = 1 << mmu2->sets;
746cc562d2eSVineet Gupta 			mmu->ways = 1 << mmu2->ways;
747cc562d2eSVineet Gupta 			mmu->u_dtlb = mmu2->u_dtlb;
748cc562d2eSVineet Gupta 			mmu->u_itlb = mmu2->u_itlb;
74992d44128SVineet Gupta 		} else {
750cc562d2eSVineet Gupta 			mmu3 = (struct bcr_mmu_3 *)&tmp;
75140b552d9SVineet Gupta 			mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
752cc562d2eSVineet Gupta 			mmu->sets = 1 << mmu3->sets;
753cc562d2eSVineet Gupta 			mmu->ways = 1 << mmu3->ways;
754cc562d2eSVineet Gupta 			mmu->u_dtlb = mmu3->u_dtlb;
755cc562d2eSVineet Gupta 			mmu->u_itlb = mmu3->u_itlb;
756d0890ea5SVineet Gupta 			mmu->sasid = mmu3->sasid;
75792d44128SVineet Gupta 		}
758d7a512bfSVineet Gupta 	} else {
759d7a512bfSVineet Gupta 		mmu4 = (struct bcr_mmu_4 *)&tmp;
760d7a512bfSVineet Gupta 		mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
761d7a512bfSVineet Gupta 		mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
762d7a512bfSVineet Gupta 		mmu->sets = 64 << mmu4->n_entry;
763d7a512bfSVineet Gupta 		mmu->ways = mmu4->n_ways * 2;
764d7a512bfSVineet Gupta 		mmu->u_dtlb = mmu4->u_dtlb * 4;
765d7a512bfSVineet Gupta 		mmu->u_itlb = mmu4->u_itlb * 4;
766d0890ea5SVineet Gupta 		mmu->sasid = mmu4->sasid;
767b5ddb6d5SVineet Gupta 		pae_exists = mmu->pae = mmu4->pae;
768cc562d2eSVineet Gupta 	}
769cc562d2eSVineet Gupta }
770cc562d2eSVineet Gupta 
771af617428SVineet Gupta char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
772af617428SVineet Gupta {
773af617428SVineet Gupta 	int n = 0;
774e3edeb67SNoam Camus 	struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
775d7a512bfSVineet Gupta 	char super_pg[64] = "";
776d7a512bfSVineet Gupta 
777d7a512bfSVineet Gupta 	if (p_mmu->s_pg_sz_m)
778d7c46114SVineet Gupta 		scnprintf(super_pg, 64, "%dM Super Page %s",
7796ce18798SVineet Gupta 			  p_mmu->s_pg_sz_m,
780964cf28fSVineet Gupta 			  IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
781af617428SVineet Gupta 
782af617428SVineet Gupta 	n += scnprintf(buf + n, len - n,
7835a364c2aSVineet Gupta 		      "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
784d7a512bfSVineet Gupta 		       p_mmu->ver, p_mmu->pg_sz_k, super_pg,
785b598e17fSVineet Gupta 		       p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
7865a364c2aSVineet Gupta 		       p_mmu->u_dtlb, p_mmu->u_itlb,
787d7c46114SVineet Gupta 		       IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
788af617428SVineet Gupta 
789af617428SVineet Gupta 	return buf;
790af617428SVineet Gupta }
791af617428SVineet Gupta 
792b5ddb6d5SVineet Gupta int pae40_exist_but_not_enab(void)
793b5ddb6d5SVineet Gupta {
794b5ddb6d5SVineet Gupta 	return pae_exists && !is_pae40_enabled();
795b5ddb6d5SVineet Gupta }
796b5ddb6d5SVineet Gupta 
797ce759956SPaul Gortmaker void arc_mmu_init(void)
798cc562d2eSVineet Gupta {
799af617428SVineet Gupta 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
80092d44128SVineet Gupta 	char str[256];
80192d44128SVineet Gupta 	int compat = 0;
802af617428SVineet Gupta 
80318ee4becSNoam Camus 	pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
804af617428SVineet Gupta 
80515ca68a9SNoam Camus 	/*
8065f840df5SFlavio Suligoi 	 * Can't be done in processor.h due to header include dependencies
80715ca68a9SNoam Camus 	 */
80815ca68a9SNoam Camus 	BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
80915ca68a9SNoam Camus 
8108bcf2c48SNoam Camus 	/*
8118bcf2c48SNoam Camus 	 * stack top size sanity check,
8125f840df5SFlavio Suligoi 	 * Can't be done in processor.h due to header include dependencies
8138bcf2c48SNoam Camus 	 */
8148bcf2c48SNoam Camus 	BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
8158bcf2c48SNoam Camus 
81692d44128SVineet Gupta 	/*
81792d44128SVineet Gupta 	 * Ensure that MMU features assumed by kernel exist in hardware.
81892d44128SVineet Gupta 	 * For older ARC700 cpus, it has to be exact match, since the MMU
81992d44128SVineet Gupta 	 * revisions were not backwards compatible (MMUv3 TLB layout changed
82092d44128SVineet Gupta 	 * so even if kernel for v2 didn't use any new cmds of v3, it would
82192d44128SVineet Gupta 	 * still not work.
82292d44128SVineet Gupta 	 * For HS cpus, MMUv4 was baseline and v5 is backwards compatible
82392d44128SVineet Gupta 	 * (will run older software).
824af617428SVineet Gupta 	 */
82592d44128SVineet Gupta 	if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
82692d44128SVineet Gupta 		compat = 1;
82792d44128SVineet Gupta 	else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
82892d44128SVineet Gupta 		compat = 1;
82992d44128SVineet Gupta 
83092d44128SVineet Gupta 	if (!compat) {
831af617428SVineet Gupta 		panic("MMU ver %d doesn't match kernel built for %d...\n",
832af617428SVineet Gupta 		      mmu->ver, CONFIG_ARC_MMU_VER);
833af617428SVineet Gupta 	}
834af617428SVineet Gupta 
83540b552d9SVineet Gupta 	if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
836af617428SVineet Gupta 		panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
837af617428SVineet Gupta 
8386ce18798SVineet Gupta 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
8396ce18798SVineet Gupta 	    mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
8406ce18798SVineet Gupta 		panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
8416ce18798SVineet Gupta 		      (unsigned long)TO_MB(HPAGE_PMD_SIZE));
8426ce18798SVineet Gupta 
8435a364c2aSVineet Gupta 	if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
8445a364c2aSVineet Gupta 		panic("Hardware doesn't support PAE40\n");
8455a364c2aSVineet Gupta 
846cc562d2eSVineet Gupta 	/* Enable the MMU */
847cc562d2eSVineet Gupta 	write_aux_reg(ARC_REG_PID, MMU_ENABLE);
84841195d23SVineet Gupta 
84941195d23SVineet Gupta 	/* In smp we use this reg for interrupt 1 scratch */
850cfd9d70aSVineet Gupta #ifdef ARC_USE_SCRATCH_REG
85141195d23SVineet Gupta 	/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
85241195d23SVineet Gupta 	write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
85341195d23SVineet Gupta #endif
854b5ddb6d5SVineet Gupta 
855b5ddb6d5SVineet Gupta 	if (pae40_exist_but_not_enab())
856b5ddb6d5SVineet Gupta 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
857cc562d2eSVineet Gupta }
858cc562d2eSVineet Gupta 
859cc562d2eSVineet Gupta /*
860cc562d2eSVineet Gupta  * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
861cc562d2eSVineet Gupta  * The mapping is Column-first.
862cc562d2eSVineet Gupta  *		---------------------	-----------
863cc562d2eSVineet Gupta  *		|way0|way1|way2|way3|	|way0|way1|
864cc562d2eSVineet Gupta  *		---------------------	-----------
865cc562d2eSVineet Gupta  * [set0]	|  0 |  1 |  2 |  3 |	|  0 |  1 |
866cc562d2eSVineet Gupta  * [set1]	|  4 |  5 |  6 |  7 |	|  2 |  3 |
867cc562d2eSVineet Gupta  *		~		    ~	~	  ~
868cc562d2eSVineet Gupta  * [set127]	| 508| 509| 510| 511|	| 254| 255|
869cc562d2eSVineet Gupta  *		---------------------	-----------
870cc562d2eSVineet Gupta  * For normal operations we don't(must not) care how above works since
871cc562d2eSVineet Gupta  * MMU cmd getIndex(vaddr) abstracts that out.
872cc562d2eSVineet Gupta  * However for walking WAYS of a SET, we need to know this
873cc562d2eSVineet Gupta  */
874cc562d2eSVineet Gupta #define SET_WAY_TO_IDX(mmu, set, way)  ((set) * mmu->ways + (way))
875cc562d2eSVineet Gupta 
876cc562d2eSVineet Gupta /* Handling of Duplicate PD (TLB entry) in MMU.
877cc562d2eSVineet Gupta  * -Could be due to buggy customer tapeouts or obscure kernel bugs
878cc562d2eSVineet Gupta  * -MMU complaints not at the time of duplicate PD installation, but at the
879cc562d2eSVineet Gupta  *      time of lookup matching multiple ways.
880cc562d2eSVineet Gupta  * -Ideally these should never happen - but if they do - workaround by deleting
881cc562d2eSVineet Gupta  *      the duplicate one.
882cc562d2eSVineet Gupta  * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
883cc562d2eSVineet Gupta  */
8845f840df5SFlavio Suligoi volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
885cc562d2eSVineet Gupta 
886cc562d2eSVineet Gupta void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
887cc562d2eSVineet Gupta 			  struct pt_regs *regs)
888cc562d2eSVineet Gupta {
889cc562d2eSVineet Gupta 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
8908840e14cSVineet Gupta 	unsigned long flags;
89189c92142SVineet Gupta 	int set, n_ways = mmu->ways;
89289c92142SVineet Gupta 
89389c92142SVineet Gupta 	n_ways = min(n_ways, 4);
89489c92142SVineet Gupta 	BUG_ON(mmu->ways > 4);
895cc562d2eSVineet Gupta 
896cc562d2eSVineet Gupta 	local_irq_save(flags);
897cc562d2eSVineet Gupta 
898cc562d2eSVineet Gupta 	/* loop thru all sets of TLB */
899cc562d2eSVineet Gupta 	for (set = 0; set < mmu->sets; set++) {
900cc562d2eSVineet Gupta 
9018840e14cSVineet Gupta 		int is_valid, way;
90289c92142SVineet Gupta 		unsigned int pd0[4];
9038840e14cSVineet Gupta 
904cc562d2eSVineet Gupta 		/* read out all the ways of current set */
90589c92142SVineet Gupta 		for (way = 0, is_valid = 0; way < n_ways; way++) {
906cc562d2eSVineet Gupta 			write_aux_reg(ARC_REG_TLBINDEX,
907cc562d2eSVineet Gupta 					  SET_WAY_TO_IDX(mmu, set, way));
908cc562d2eSVineet Gupta 			write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
909cc562d2eSVineet Gupta 			pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
910cc562d2eSVineet Gupta 			is_valid |= pd0[way] & _PAGE_PRESENT;
9118840e14cSVineet Gupta 			pd0[way] &= PAGE_MASK;
912cc562d2eSVineet Gupta 		}
913cc562d2eSVineet Gupta 
914cc562d2eSVineet Gupta 		/* If all the WAYS in SET are empty, skip to next SET */
915cc562d2eSVineet Gupta 		if (!is_valid)
916cc562d2eSVineet Gupta 			continue;
917cc562d2eSVineet Gupta 
918cc562d2eSVineet Gupta 		/* Scan the set for duplicate ways: needs a nested loop */
91989c92142SVineet Gupta 		for (way = 0; way < n_ways - 1; way++) {
9208840e14cSVineet Gupta 
9218840e14cSVineet Gupta 			int n;
9228840e14cSVineet Gupta 
923cc562d2eSVineet Gupta 			if (!pd0[way])
924cc562d2eSVineet Gupta 				continue;
925cc562d2eSVineet Gupta 
92689c92142SVineet Gupta 			for (n = way + 1; n < n_ways; n++) {
9278840e14cSVineet Gupta 				if (pd0[way] != pd0[n])
9288840e14cSVineet Gupta 					continue;
929cc562d2eSVineet Gupta 
9308840e14cSVineet Gupta 				if (!dup_pd_silent)
9318840e14cSVineet Gupta 					pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
9328840e14cSVineet Gupta 						pd0[way], set, way, n);
933cc562d2eSVineet Gupta 
934cc562d2eSVineet Gupta 				/*
9358840e14cSVineet Gupta 				 * clear entry @way and not @n.
9368840e14cSVineet Gupta 				 * This is critical to our optimised loop
937cc562d2eSVineet Gupta 				 */
9388840e14cSVineet Gupta 				pd0[way] = 0;
939cc562d2eSVineet Gupta 				write_aux_reg(ARC_REG_TLBINDEX,
940cc562d2eSVineet Gupta 						SET_WAY_TO_IDX(mmu, set, way));
941cc562d2eSVineet Gupta 				__tlb_entry_erase();
942cc562d2eSVineet Gupta 			}
943cc562d2eSVineet Gupta 		}
944cc562d2eSVineet Gupta 	}
945cc562d2eSVineet Gupta 
946cc562d2eSVineet Gupta 	local_irq_restore(flags);
947cc562d2eSVineet Gupta }
948cc562d2eSVineet Gupta 
949cc562d2eSVineet Gupta /***********************************************************************
950cc562d2eSVineet Gupta  * Diagnostic Routines
9515f840df5SFlavio Suligoi  *  -Called from Low Level TLB Handlers if things don;t look good
952cc562d2eSVineet Gupta  **********************************************************************/
953cc562d2eSVineet Gupta 
954cc562d2eSVineet Gupta #ifdef CONFIG_ARC_DBG_TLB_PARANOIA
955cc562d2eSVineet Gupta 
956cc562d2eSVineet Gupta /*
957cc562d2eSVineet Gupta  * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
958cc562d2eSVineet Gupta  * don't match
959cc562d2eSVineet Gupta  */
9605bd87adfSVineet Gupta void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
961cc562d2eSVineet Gupta {
962cc562d2eSVineet Gupta 	pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
9635bd87adfSVineet Gupta 	       is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
964cc562d2eSVineet Gupta 
965cc562d2eSVineet Gupta 	__asm__ __volatile__("flag 1");
966cc562d2eSVineet Gupta }
967cc562d2eSVineet Gupta 
9685bd87adfSVineet Gupta void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
969cc562d2eSVineet Gupta {
9705bd87adfSVineet Gupta 	unsigned int mmu_asid;
971cc562d2eSVineet Gupta 
9725bd87adfSVineet Gupta 	mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
973cc562d2eSVineet Gupta 
9745bd87adfSVineet Gupta 	/*
9755bd87adfSVineet Gupta 	 * At the time of a TLB miss/installation
9765bd87adfSVineet Gupta 	 *   - HW version needs to match SW version
9775bd87adfSVineet Gupta 	 *   - SW needs to have a valid ASID
9785bd87adfSVineet Gupta 	 */
9795bd87adfSVineet Gupta 	if (addr < 0x70000000 &&
980947bf103SVineet Gupta 	    ((mm_asid == MM_CTXT_NO_ASID) ||
981947bf103SVineet Gupta 	      (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
9825bd87adfSVineet Gupta 		print_asid_mismatch(mm_asid, mmu_asid, 0);
983cc562d2eSVineet Gupta }
984cc562d2eSVineet Gupta #endif
985