xref: /openbmc/linux/arch/nios2/mm/mmu_context.c (revision 71995e4d)
1 /*
2  * MMU context handling.
3  *
4  * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
5  * Copyright (C) 2009 Wind River Systems Inc
6  *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 
13 #include <linux/mm.h>
14 
15 #include <asm/cpuinfo.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlb.h>
18 
19 /* The pids position and mask in context */
20 #define PID_SHIFT	0
21 #define PID_BITS	(cpuinfo.tlb_pid_num_bits)
22 #define PID_MASK	((1UL << PID_BITS) - 1)
23 
24 /* The versions position and mask in context */
25 #define VERSION_BITS	(32 - PID_BITS)
26 #define VERSION_SHIFT	(PID_SHIFT + PID_BITS)
27 #define VERSION_MASK	((1UL << VERSION_BITS) - 1)
28 
29 /* Return the version part of a context */
30 #define CTX_VERSION(c)	(((c) >> VERSION_SHIFT) & VERSION_MASK)
31 
32 /* Return the pid part of a context */
33 #define CTX_PID(c)	(((c) >> PID_SHIFT) & PID_MASK)
34 
35 /* Value of the first context (version 1, pid 0) */
36 #define FIRST_CTX	((1UL << VERSION_SHIFT) | (0 << PID_SHIFT))
37 
38 static mm_context_t next_mmu_context;
39 
40 /*
41  * Initialize MMU context management stuff.
42  */
mmu_context_init(void)43 void __init mmu_context_init(void)
44 {
45 	/* We need to set this here because the value depends on runtime data
46 	 * from cpuinfo */
47 	next_mmu_context = FIRST_CTX;
48 }
49 
50 /*
51  * Set new context (pid), keep way
52  */
set_context(mm_context_t context)53 static void set_context(mm_context_t context)
54 {
55 	set_mmu_pid(CTX_PID(context));
56 }
57 
get_new_context(void)58 static mm_context_t get_new_context(void)
59 {
60 	/* Return the next pid */
61 	next_mmu_context += (1UL << PID_SHIFT);
62 
63 	/* If the pid field wraps around we increase the version and
64 	 * flush the tlb */
65 	if (unlikely(CTX_PID(next_mmu_context) == 0)) {
66 		/* Version is incremented since the pid increment above
67 		 * overflows info version */
68 		flush_cache_all();
69 		flush_tlb_all();
70 	}
71 
72 	/* If the version wraps we start over with the first generation, we do
73 	 * not need to flush the tlb here since it's always done above */
74 	if (unlikely(CTX_VERSION(next_mmu_context) == 0))
75 		next_mmu_context = FIRST_CTX;
76 
77 	return next_mmu_context;
78 }
79 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)80 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
81 	       struct task_struct *tsk)
82 {
83 	unsigned long flags;
84 
85 	local_irq_save(flags);
86 
87 	/* If the process context we are swapping in has a different context
88 	 * generation then we have it should get a new generation/pid */
89 	if (unlikely(CTX_VERSION(next->context) !=
90 		CTX_VERSION(next_mmu_context)))
91 		next->context = get_new_context();
92 
93 	/* Save the current pgd so the fast tlb handler can find it */
94 	pgd_current = next->pgd;
95 
96 	/* Set the current context */
97 	set_context(next->context);
98 
99 	local_irq_restore(flags);
100 }
101 
102 /*
103  * After we have set current->mm to a new value, this activates
104  * the context for the new mm so we see the new mappings.
105  */
activate_mm(struct mm_struct * prev,struct mm_struct * next)106 void activate_mm(struct mm_struct *prev, struct mm_struct *next)
107 {
108 	next->context = get_new_context();
109 	set_context(next->context);
110 	pgd_current = next->pgd;
111 }
112 
get_pid_from_context(mm_context_t * context)113 unsigned long get_pid_from_context(mm_context_t *context)
114 {
115 	return CTX_PID((*context));
116 }
117