xref: /openbmc/linux/arch/ia64/include/asm/mmu_context.h (revision 7f30491ccd28627742e37899453ae20e3da8e18f)
1*7f30491cSTony Luck #ifndef _ASM_IA64_MMU_CONTEXT_H
2*7f30491cSTony Luck #define _ASM_IA64_MMU_CONTEXT_H
3*7f30491cSTony Luck 
4*7f30491cSTony Luck /*
5*7f30491cSTony Luck  * Copyright (C) 1998-2002 Hewlett-Packard Co
6*7f30491cSTony Luck  *	David Mosberger-Tang <davidm@hpl.hp.com>
7*7f30491cSTony Luck  */
8*7f30491cSTony Luck 
9*7f30491cSTony Luck /*
10*7f30491cSTony Luck  * Routines to manage the allocation of task context numbers.  Task context
11*7f30491cSTony Luck  * numbers are used to reduce or eliminate the need to perform TLB flushes
12*7f30491cSTony Luck  * due to context switches.  Context numbers are implemented using ia-64
13*7f30491cSTony Luck  * region ids.  Since the IA-64 TLB does not consider the region number when
14*7f30491cSTony Luck  * performing a TLB lookup, we need to assign a unique region id to each
15*7f30491cSTony Luck  * region in a process.  We use the least significant three bits in aregion
16*7f30491cSTony Luck  * id for this purpose.
17*7f30491cSTony Luck  */
18*7f30491cSTony Luck 
19*7f30491cSTony Luck #define IA64_REGION_ID_KERNEL	0 /* the kernel's region id (tlb.c depends on this being 0) */
20*7f30491cSTony Luck 
21*7f30491cSTony Luck #define ia64_rid(ctx,addr)	(((ctx) << 3) | (addr >> 61))
22*7f30491cSTony Luck 
23*7f30491cSTony Luck # include <asm/page.h>
24*7f30491cSTony Luck # ifndef __ASSEMBLY__
25*7f30491cSTony Luck 
26*7f30491cSTony Luck #include <linux/compiler.h>
27*7f30491cSTony Luck #include <linux/percpu.h>
28*7f30491cSTony Luck #include <linux/sched.h>
29*7f30491cSTony Luck #include <linux/spinlock.h>
30*7f30491cSTony Luck 
31*7f30491cSTony Luck #include <asm/processor.h>
32*7f30491cSTony Luck #include <asm-generic/mm_hooks.h>
33*7f30491cSTony Luck 
34*7f30491cSTony Luck struct ia64_ctx {
35*7f30491cSTony Luck 	spinlock_t lock;
36*7f30491cSTony Luck 	unsigned int next;	/* next context number to use */
37*7f30491cSTony Luck 	unsigned int limit;     /* available free range */
38*7f30491cSTony Luck 	unsigned int max_ctx;   /* max. context value supported by all CPUs */
39*7f30491cSTony Luck 				/* call wrap_mmu_context when next >= max */
40*7f30491cSTony Luck 	unsigned long *bitmap;  /* bitmap size is max_ctx+1 */
41*7f30491cSTony Luck 	unsigned long *flushmap;/* pending rid to be flushed */
42*7f30491cSTony Luck };
43*7f30491cSTony Luck 
44*7f30491cSTony Luck extern struct ia64_ctx ia64_ctx;
45*7f30491cSTony Luck DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
46*7f30491cSTony Luck 
47*7f30491cSTony Luck extern void mmu_context_init (void);
48*7f30491cSTony Luck extern void wrap_mmu_context (struct mm_struct *mm);
49*7f30491cSTony Luck 
50*7f30491cSTony Luck static inline void
51*7f30491cSTony Luck enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
52*7f30491cSTony Luck {
53*7f30491cSTony Luck }
54*7f30491cSTony Luck 
55*7f30491cSTony Luck /*
56*7f30491cSTony Luck  * When the context counter wraps around all TLBs need to be flushed because
57*7f30491cSTony Luck  * an old context number might have been reused. This is signalled by the
58*7f30491cSTony Luck  * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
59*7f30491cSTony Luck  * below. Called by activate_mm(). <efocht@ess.nec.de>
60*7f30491cSTony Luck  */
61*7f30491cSTony Luck static inline void
62*7f30491cSTony Luck delayed_tlb_flush (void)
63*7f30491cSTony Luck {
64*7f30491cSTony Luck 	extern void local_flush_tlb_all (void);
65*7f30491cSTony Luck 	unsigned long flags;
66*7f30491cSTony Luck 
67*7f30491cSTony Luck 	if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
68*7f30491cSTony Luck 		spin_lock_irqsave(&ia64_ctx.lock, flags);
69*7f30491cSTony Luck 		if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
70*7f30491cSTony Luck 			local_flush_tlb_all();
71*7f30491cSTony Luck 			__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
72*7f30491cSTony Luck 		}
73*7f30491cSTony Luck 		spin_unlock_irqrestore(&ia64_ctx.lock, flags);
74*7f30491cSTony Luck 	}
75*7f30491cSTony Luck }
76*7f30491cSTony Luck 
77*7f30491cSTony Luck static inline nv_mm_context_t
78*7f30491cSTony Luck get_mmu_context (struct mm_struct *mm)
79*7f30491cSTony Luck {
80*7f30491cSTony Luck 	unsigned long flags;
81*7f30491cSTony Luck 	nv_mm_context_t context = mm->context;
82*7f30491cSTony Luck 
83*7f30491cSTony Luck 	if (likely(context))
84*7f30491cSTony Luck 		goto out;
85*7f30491cSTony Luck 
86*7f30491cSTony Luck 	spin_lock_irqsave(&ia64_ctx.lock, flags);
87*7f30491cSTony Luck 	/* re-check, now that we've got the lock: */
88*7f30491cSTony Luck 	context = mm->context;
89*7f30491cSTony Luck 	if (context == 0) {
90*7f30491cSTony Luck 		cpus_clear(mm->cpu_vm_mask);
91*7f30491cSTony Luck 		if (ia64_ctx.next >= ia64_ctx.limit) {
92*7f30491cSTony Luck 			ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
93*7f30491cSTony Luck 					ia64_ctx.max_ctx, ia64_ctx.next);
94*7f30491cSTony Luck 			ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
95*7f30491cSTony Luck 					ia64_ctx.max_ctx, ia64_ctx.next);
96*7f30491cSTony Luck 			if (ia64_ctx.next >= ia64_ctx.max_ctx)
97*7f30491cSTony Luck 				wrap_mmu_context(mm);
98*7f30491cSTony Luck 		}
99*7f30491cSTony Luck 		mm->context = context = ia64_ctx.next++;
100*7f30491cSTony Luck 		__set_bit(context, ia64_ctx.bitmap);
101*7f30491cSTony Luck 	}
102*7f30491cSTony Luck 	spin_unlock_irqrestore(&ia64_ctx.lock, flags);
103*7f30491cSTony Luck out:
104*7f30491cSTony Luck 	/*
105*7f30491cSTony Luck 	 * Ensure we're not starting to use "context" before any old
106*7f30491cSTony Luck 	 * uses of it are gone from our TLB.
107*7f30491cSTony Luck 	 */
108*7f30491cSTony Luck 	delayed_tlb_flush();
109*7f30491cSTony Luck 
110*7f30491cSTony Luck 	return context;
111*7f30491cSTony Luck }
112*7f30491cSTony Luck 
113*7f30491cSTony Luck /*
114*7f30491cSTony Luck  * Initialize context number to some sane value.  MM is guaranteed to be a
115*7f30491cSTony Luck  * brand-new address-space, so no TLB flushing is needed, ever.
116*7f30491cSTony Luck  */
117*7f30491cSTony Luck static inline int
118*7f30491cSTony Luck init_new_context (struct task_struct *p, struct mm_struct *mm)
119*7f30491cSTony Luck {
120*7f30491cSTony Luck 	mm->context = 0;
121*7f30491cSTony Luck 	return 0;
122*7f30491cSTony Luck }
123*7f30491cSTony Luck 
124*7f30491cSTony Luck static inline void
125*7f30491cSTony Luck destroy_context (struct mm_struct *mm)
126*7f30491cSTony Luck {
127*7f30491cSTony Luck 	/* Nothing to do.  */
128*7f30491cSTony Luck }
129*7f30491cSTony Luck 
130*7f30491cSTony Luck static inline void
131*7f30491cSTony Luck reload_context (nv_mm_context_t context)
132*7f30491cSTony Luck {
133*7f30491cSTony Luck 	unsigned long rid;
134*7f30491cSTony Luck 	unsigned long rid_incr = 0;
135*7f30491cSTony Luck 	unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
136*7f30491cSTony Luck 
137*7f30491cSTony Luck 	old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
138*7f30491cSTony Luck 	rid = context << 3;	/* make space for encoding the region number */
139*7f30491cSTony Luck 	rid_incr = 1 << 8;
140*7f30491cSTony Luck 
141*7f30491cSTony Luck 	/* encode the region id, preferred page size, and VHPT enable bit: */
142*7f30491cSTony Luck 	rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
143*7f30491cSTony Luck 	rr1 = rr0 + 1*rid_incr;
144*7f30491cSTony Luck 	rr2 = rr0 + 2*rid_incr;
145*7f30491cSTony Luck 	rr3 = rr0 + 3*rid_incr;
146*7f30491cSTony Luck 	rr4 = rr0 + 4*rid_incr;
147*7f30491cSTony Luck #ifdef  CONFIG_HUGETLB_PAGE
148*7f30491cSTony Luck 	rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
149*7f30491cSTony Luck 
150*7f30491cSTony Luck #  if RGN_HPAGE != 4
151*7f30491cSTony Luck #    error "reload_context assumes RGN_HPAGE is 4"
152*7f30491cSTony Luck #  endif
153*7f30491cSTony Luck #endif
154*7f30491cSTony Luck 
155*7f30491cSTony Luck 	ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
156*7f30491cSTony Luck 	ia64_srlz_i();			/* srlz.i implies srlz.d */
157*7f30491cSTony Luck }
158*7f30491cSTony Luck 
159*7f30491cSTony Luck /*
160*7f30491cSTony Luck  * Must be called with preemption off
161*7f30491cSTony Luck  */
162*7f30491cSTony Luck static inline void
163*7f30491cSTony Luck activate_context (struct mm_struct *mm)
164*7f30491cSTony Luck {
165*7f30491cSTony Luck 	nv_mm_context_t context;
166*7f30491cSTony Luck 
167*7f30491cSTony Luck 	do {
168*7f30491cSTony Luck 		context = get_mmu_context(mm);
169*7f30491cSTony Luck 		if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
170*7f30491cSTony Luck 			cpu_set(smp_processor_id(), mm->cpu_vm_mask);
171*7f30491cSTony Luck 		reload_context(context);
172*7f30491cSTony Luck 		/*
173*7f30491cSTony Luck 		 * in the unlikely event of a TLB-flush by another thread,
174*7f30491cSTony Luck 		 * redo the load.
175*7f30491cSTony Luck 		 */
176*7f30491cSTony Luck 	} while (unlikely(context != mm->context));
177*7f30491cSTony Luck }
178*7f30491cSTony Luck 
179*7f30491cSTony Luck #define deactivate_mm(tsk,mm)	do { } while (0)
180*7f30491cSTony Luck 
181*7f30491cSTony Luck /*
182*7f30491cSTony Luck  * Switch from address space PREV to address space NEXT.
183*7f30491cSTony Luck  */
184*7f30491cSTony Luck static inline void
185*7f30491cSTony Luck activate_mm (struct mm_struct *prev, struct mm_struct *next)
186*7f30491cSTony Luck {
187*7f30491cSTony Luck 	/*
188*7f30491cSTony Luck 	 * We may get interrupts here, but that's OK because interrupt
189*7f30491cSTony Luck 	 * handlers cannot touch user-space.
190*7f30491cSTony Luck 	 */
191*7f30491cSTony Luck 	ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
192*7f30491cSTony Luck 	activate_context(next);
193*7f30491cSTony Luck }
194*7f30491cSTony Luck 
195*7f30491cSTony Luck #define switch_mm(prev_mm,next_mm,next_task)	activate_mm(prev_mm, next_mm)
196*7f30491cSTony Luck 
197*7f30491cSTony Luck # endif /* ! __ASSEMBLY__ */
198*7f30491cSTony Luck #endif /* _ASM_IA64_MMU_CONTEXT_H */
199