xref: /openbmc/linux/arch/arc/include/asm/mmu_context.h (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2f1f3347dSVineet Gupta /*
3f1f3347dSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4f1f3347dSVineet Gupta  *
5f1f3347dSVineet Gupta  * vineetg: May 2011
6f1f3347dSVineet Gupta  *  -Refactored get_new_mmu_context( ) to only handle live-mm.
7f1f3347dSVineet Gupta  *   retiring-mm handled in other hooks
8f1f3347dSVineet Gupta  *
9f1f3347dSVineet Gupta  * Vineetg: March 25th, 2008: Bug #92690
10f1f3347dSVineet Gupta  *  -Major rewrite of Core ASID allocation routine get_new_mmu_context
11f1f3347dSVineet Gupta  *
12f1f3347dSVineet Gupta  * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
13f1f3347dSVineet Gupta  */
14f1f3347dSVineet Gupta 
15f1f3347dSVineet Gupta #ifndef _ASM_ARC_MMU_CONTEXT_H
16f1f3347dSVineet Gupta #define _ASM_ARC_MMU_CONTEXT_H
17f1f3347dSVineet Gupta 
186e84f315SIngo Molnar #include <linux/sched/mm.h>
19f1f3347dSVineet Gupta 
20*89d0d424SVineet Gupta #include <asm/tlb.h>
21f1f3347dSVineet Gupta #include <asm-generic/mm_hooks.h>
22f1f3347dSVineet Gupta 
23*89d0d424SVineet Gupta /*		ARC ASID Management
24f1f3347dSVineet Gupta  *
25*89d0d424SVineet Gupta  * MMU tags TLBs with an 8-bit ASID, avoiding need to flush the TLB on
26*89d0d424SVineet Gupta  * context-switch.
27f1f3347dSVineet Gupta  *
28*89d0d424SVineet Gupta  * ASID is managed per cpu, so task threads across CPUs can have different
29*89d0d424SVineet Gupta  * ASID. Global ASID management is needed if hardware supports TLB shootdown
30*89d0d424SVineet Gupta  * and/or shared TLB across cores, which ARC doesn't.
31*89d0d424SVineet Gupta  *
32*89d0d424SVineet Gupta  * Each task is assigned unique ASID, with a simple round-robin allocator
33*89d0d424SVineet Gupta  * tracked in @asid_cpu. When 8-bit value rolls over,a new cycle is started
34*89d0d424SVineet Gupta  * over from 0, and TLB is flushed
35f1f3347dSVineet Gupta  *
36947bf103SVineet Gupta  * A new allocation cycle, post rollover, could potentially reassign an ASID
37947bf103SVineet Gupta  * to a different task. Thus the rule is to refresh the ASID in a new cycle.
3863eca94cSVineet Gupta  * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
39947bf103SVineet Gupta  * serve as cycle/generation indicator and natural 32 bit unsigned math
40947bf103SVineet Gupta  * automagically increments the generation when lower 8 bits rollover.
41f1f3347dSVineet Gupta  */
42f1f3347dSVineet Gupta 
43947bf103SVineet Gupta #define MM_CTXT_ASID_MASK	0x000000ff /* MMU PID reg :8 bit PID */
44947bf103SVineet Gupta #define MM_CTXT_CYCLE_MASK	(~MM_CTXT_ASID_MASK)
45f1f3347dSVineet Gupta 
46947bf103SVineet Gupta #define MM_CTXT_FIRST_CYCLE	(MM_CTXT_ASID_MASK + 1)
47947bf103SVineet Gupta #define MM_CTXT_NO_ASID		0UL
48f1f3347dSVineet Gupta 
4963eca94cSVineet Gupta #define asid_mm(mm, cpu)	mm->context.asid[cpu]
5063eca94cSVineet Gupta #define hw_pid(mm, cpu)		(asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
51947bf103SVineet Gupta 
5263eca94cSVineet Gupta DECLARE_PER_CPU(unsigned int, asid_cache);
5363eca94cSVineet Gupta #define asid_cpu(cpu)		per_cpu(asid_cache, cpu)
54f1f3347dSVineet Gupta 
55f1f3347dSVineet Gupta /*
563daa48d1SVineet Gupta  * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
573daa48d1SVineet Gupta  * Also set the MMU PID register to existing/updated ASID
58f1f3347dSVineet Gupta  */
get_new_mmu_context(struct mm_struct * mm)59f1f3347dSVineet Gupta static inline void get_new_mmu_context(struct mm_struct *mm)
60f1f3347dSVineet Gupta {
6163eca94cSVineet Gupta 	const unsigned int cpu = smp_processor_id();
62f1f3347dSVineet Gupta 	unsigned long flags;
63f1f3347dSVineet Gupta 
64f1f3347dSVineet Gupta 	local_irq_save(flags);
65f1f3347dSVineet Gupta 
66f1f3347dSVineet Gupta 	/*
673daa48d1SVineet Gupta 	 * Move to new ASID if it was not from current alloc-cycle/generation.
68947bf103SVineet Gupta 	 * This is done by ensuring that the generation bits in both mm->ASID
69947bf103SVineet Gupta 	 * and cpu's ASID counter are exactly same.
703daa48d1SVineet Gupta 	 *
713daa48d1SVineet Gupta 	 * Note: Callers needing new ASID unconditionally, independent of
723daa48d1SVineet Gupta 	 * 	 generation, e.g. local_flush_tlb_mm() for forking  parent,
733daa48d1SVineet Gupta 	 * 	 first need to destroy the context, setting it to invalid
743daa48d1SVineet Gupta 	 * 	 value.
753daa48d1SVineet Gupta 	 */
7663eca94cSVineet Gupta 	if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
773daa48d1SVineet Gupta 		goto set_hw;
783daa48d1SVineet Gupta 
79947bf103SVineet Gupta 	/* move to new ASID and handle rollover */
8063eca94cSVineet Gupta 	if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
81f1f3347dSVineet Gupta 
825ea72a90SVineet Gupta 		local_flush_tlb_all();
83947bf103SVineet Gupta 
84947bf103SVineet Gupta 		/*
852547476aSAndrea Gelmini 		 * Above check for rollover of 8 bit ASID in 32 bit container.
86947bf103SVineet Gupta 		 * If the container itself wrapped around, set it to a non zero
87947bf103SVineet Gupta 		 * "generation" to distinguish from no context
88947bf103SVineet Gupta 		 */
8963eca94cSVineet Gupta 		if (!asid_cpu(cpu))
9063eca94cSVineet Gupta 			asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
91f1f3347dSVineet Gupta 	}
92f1f3347dSVineet Gupta 
93f1f3347dSVineet Gupta 	/* Assign new ASID to tsk */
9463eca94cSVineet Gupta 	asid_mm(mm, cpu) = asid_cpu(cpu);
95f1f3347dSVineet Gupta 
963daa48d1SVineet Gupta set_hw:
97*89d0d424SVineet Gupta 	mmu_setup_asid(mm, hw_pid(mm, cpu));
98f1f3347dSVineet Gupta 
99f1f3347dSVineet Gupta 	local_irq_restore(flags);
100f1f3347dSVineet Gupta }
101f1f3347dSVineet Gupta 
102f1f3347dSVineet Gupta /*
103f1f3347dSVineet Gupta  * Initialize the context related info for a new mm_struct
104f1f3347dSVineet Gupta  * instance.
105f1f3347dSVineet Gupta  */
10675e6a851SNicholas Piggin #define init_new_context init_new_context
107f1f3347dSVineet Gupta static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)108f1f3347dSVineet Gupta init_new_context(struct task_struct *tsk, struct mm_struct *mm)
109f1f3347dSVineet Gupta {
11063eca94cSVineet Gupta 	int i;
11163eca94cSVineet Gupta 
11263eca94cSVineet Gupta 	for_each_possible_cpu(i)
11363eca94cSVineet Gupta 		asid_mm(mm, i) = MM_CTXT_NO_ASID;
11463eca94cSVineet Gupta 
115f1f3347dSVineet Gupta 	return 0;
116f1f3347dSVineet Gupta }
117f1f3347dSVineet Gupta 
11875e6a851SNicholas Piggin #define destroy_context destroy_context
destroy_context(struct mm_struct * mm)11963eca94cSVineet Gupta static inline void destroy_context(struct mm_struct *mm)
12063eca94cSVineet Gupta {
12163eca94cSVineet Gupta 	unsigned long flags;
12263eca94cSVineet Gupta 
12363eca94cSVineet Gupta 	/* Needed to elide CONFIG_DEBUG_PREEMPT warning */
12463eca94cSVineet Gupta 	local_irq_save(flags);
12563eca94cSVineet Gupta 	asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
12663eca94cSVineet Gupta 	local_irq_restore(flags);
12763eca94cSVineet Gupta }
12863eca94cSVineet Gupta 
129f1f3347dSVineet Gupta /* Prepare the MMU for task: setup PID reg with allocated ASID
130f1f3347dSVineet Gupta     If task doesn't have an ASID (never alloc or stolen, get a new ASID)
131f1f3347dSVineet Gupta */
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)132f1f3347dSVineet Gupta static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
133f1f3347dSVineet Gupta 			     struct task_struct *tsk)
134f1f3347dSVineet Gupta {
1355ea72a90SVineet Gupta 	const int cpu = smp_processor_id();
1365ea72a90SVineet Gupta 
1375ea72a90SVineet Gupta 	/*
1385ea72a90SVineet Gupta 	 * Note that the mm_cpumask is "aggregating" only, we don't clear it
1395ea72a90SVineet Gupta 	 * for the switched-out task, unlike some other arches.
1405ea72a90SVineet Gupta 	 * It is used to enlist cpus for sending TLB flush IPIs and not sending
1415ea72a90SVineet Gupta 	 * it to CPUs where a task once ran-on, could cause stale TLB entry
1425ea72a90SVineet Gupta 	 * re-use, specially for a multi-threaded task.
1435ea72a90SVineet Gupta 	 * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
1445ea72a90SVineet Gupta 	 *      For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
1455ea72a90SVineet Gupta 	 *      were to re-migrate to C1, it could access the unmapped region
1465ea72a90SVineet Gupta 	 *      via any existing stale TLB entries.
1475ea72a90SVineet Gupta 	 */
1485ea72a90SVineet Gupta 	cpumask_set_cpu(cpu, mm_cpumask(next));
1495ea72a90SVineet Gupta 
150*89d0d424SVineet Gupta 	mmu_setup_pgd(next, next->pgd);
151f1f3347dSVineet Gupta 
152f1f3347dSVineet Gupta 	get_new_mmu_context(next);
153f1f3347dSVineet Gupta }
154f1f3347dSVineet Gupta 
155c6011553SVineet Gupta /*
15675e6a851SNicholas Piggin  * activate_mm defaults (in asm-generic) to switch_mm and is called at the
15775e6a851SNicholas Piggin  * time of execve() to get a new ASID Note the subtlety here:
15875e6a851SNicholas Piggin  * get_new_mmu_context() behaves differently here vs. in switch_mm(). Here
15975e6a851SNicholas Piggin  * it always returns a new ASID, because mm has an unallocated "initial"
16075e6a851SNicholas Piggin  * value, while in latter, it moves to a new ASID, only if it was
16175e6a851SNicholas Piggin  * unallocated
162c6011553SVineet Gupta  */
163c6011553SVineet Gupta 
164f1f3347dSVineet Gupta /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
165f1f3347dSVineet Gupta  * for retiring-mm. However destroy_context( ) still needs to do that because
166f1f3347dSVineet Gupta  * between mm_release( ) = >deactive_mm( ) and
167f1f3347dSVineet Gupta  * mmput => .. => __mmdrop( ) => destroy_context( )
168f1f3347dSVineet Gupta  * there is a good chance that task gets sched-out/in, making it's ASID valid
169f1f3347dSVineet Gupta  * again (this teased me for a whole day).
170f1f3347dSVineet Gupta  */
171f1f3347dSVineet Gupta 
17275e6a851SNicholas Piggin #include <asm-generic/mmu_context.h>
173f1f3347dSVineet Gupta 
174f1f3347dSVineet Gupta #endif /* __ASM_ARC_MMU_CONTEXT_H */
175