1 /*
2  *  MMU context allocation for 64-bit kernels.
3  *
4  *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/mm.h>
19 #include <linux/pkeys.h>
20 #include <linux/spinlock.h>
21 #include <linux/idr.h>
22 #include <linux/export.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
28 
29 static DEFINE_IDA(mmu_context_ida);
30 
31 static int alloc_context_id(int min_id, int max_id)
32 {
33 	return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
34 }
35 
36 void hash__reserve_context_id(int id)
37 {
38 	int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
39 
40 	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
41 }
42 
43 int hash__alloc_context_id(void)
44 {
45 	unsigned long max;
46 
47 	if (mmu_has_feature(MMU_FTR_68_BIT_VA))
48 		max = MAX_USER_CONTEXT;
49 	else
50 		max = MAX_USER_CONTEXT_65BIT_VA;
51 
52 	return alloc_context_id(MIN_USER_CONTEXT, max);
53 }
54 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
55 
56 void slb_setup_new_exec(void);
57 
58 static int hash__init_new_context(struct mm_struct *mm)
59 {
60 	int index;
61 
62 	index = hash__alloc_context_id();
63 	if (index < 0)
64 		return index;
65 
66 	mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
67 					   GFP_KERNEL);
68 	if (!mm->context.hash_context) {
69 		ida_free(&mmu_context_ida, index);
70 		return -ENOMEM;
71 	}
72 
73 	/*
74 	 * The old code would re-promote on fork, we don't do that when using
75 	 * slices as it could cause problem promoting slices that have been
76 	 * forced down to 4K.
77 	 *
78 	 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
79 	 * explicitly against context.id == 0. This ensures that we properly
80 	 * initialize context slice details for newly allocated mm's (which will
81 	 * have id == 0) and don't alter context slice inherited via fork (which
82 	 * will have id != 0).
83 	 *
84 	 * We should not be calling init_new_context() on init_mm. Hence a
85 	 * check against 0 is OK.
86 	 */
87 	if (mm->context.id == 0) {
88 		memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
89 		slice_init_new_context_exec(mm);
90 	} else {
91 		/* This is fork. Copy hash_context details from current->mm */
92 		memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
93 #ifdef CONFIG_PPC_SUBPAGE_PROT
94 		/* inherit subpage prot detalis if we have one. */
95 		if (current->mm->context.hash_context->spt) {
96 			mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
97 								GFP_KERNEL);
98 			if (!mm->context.hash_context->spt) {
99 				ida_free(&mmu_context_ida, index);
100 				kfree(mm->context.hash_context);
101 				return -ENOMEM;
102 			}
103 		}
104 #endif
105 
106 	}
107 
108 	pkey_mm_init(mm);
109 	return index;
110 }
111 
112 void hash__setup_new_exec(void)
113 {
114 	slice_setup_new_exec();
115 
116 	slb_setup_new_exec();
117 }
118 
119 static int radix__init_new_context(struct mm_struct *mm)
120 {
121 	unsigned long rts_field;
122 	int index, max_id;
123 
124 	max_id = (1 << mmu_pid_bits) - 1;
125 	index = alloc_context_id(mmu_base_pid, max_id);
126 	if (index < 0)
127 		return index;
128 
129 	/*
130 	 * set the process table entry,
131 	 */
132 	rts_field = radix__get_tree_size();
133 	process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
134 
135 	/*
136 	 * Order the above store with subsequent update of the PID
137 	 * register (at which point HW can start loading/caching
138 	 * the entry) and the corresponding load by the MMU from
139 	 * the L2 cache.
140 	 */
141 	asm volatile("ptesync;isync" : : : "memory");
142 
143 	mm->context.npu_context = NULL;
144 	mm->context.hash_context = NULL;
145 
146 	return index;
147 }
148 
149 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
150 {
151 	int index;
152 
153 	if (radix_enabled())
154 		index = radix__init_new_context(mm);
155 	else
156 		index = hash__init_new_context(mm);
157 
158 	if (index < 0)
159 		return index;
160 
161 	mm->context.id = index;
162 
163 	mm->context.pte_frag = NULL;
164 	mm->context.pmd_frag = NULL;
165 #ifdef CONFIG_SPAPR_TCE_IOMMU
166 	mm_iommu_init(mm);
167 #endif
168 	atomic_set(&mm->context.active_cpus, 0);
169 	atomic_set(&mm->context.copros, 0);
170 
171 	return 0;
172 }
173 
174 void __destroy_context(int context_id)
175 {
176 	ida_free(&mmu_context_ida, context_id);
177 }
178 EXPORT_SYMBOL_GPL(__destroy_context);
179 
180 static void destroy_contexts(mm_context_t *ctx)
181 {
182 	int index, context_id;
183 
184 	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
185 		context_id = ctx->extended_id[index];
186 		if (context_id)
187 			ida_free(&mmu_context_ida, context_id);
188 	}
189 	kfree(ctx->hash_context);
190 }
191 
192 static void pmd_frag_destroy(void *pmd_frag)
193 {
194 	int count;
195 	struct page *page;
196 
197 	page = virt_to_page(pmd_frag);
198 	/* drop all the pending references */
199 	count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
200 	/* We allow PTE_FRAG_NR fragments from a PTE page */
201 	if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
202 		pgtable_pmd_page_dtor(page);
203 		__free_page(page);
204 	}
205 }
206 
207 static void destroy_pagetable_cache(struct mm_struct *mm)
208 {
209 	void *frag;
210 
211 	frag = mm->context.pte_frag;
212 	if (frag)
213 		pte_frag_destroy(frag);
214 
215 	frag = mm->context.pmd_frag;
216 	if (frag)
217 		pmd_frag_destroy(frag);
218 	return;
219 }
220 
221 void destroy_context(struct mm_struct *mm)
222 {
223 #ifdef CONFIG_SPAPR_TCE_IOMMU
224 	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
225 #endif
226 	if (radix_enabled())
227 		WARN_ON(process_tb[mm->context.id].prtb0 != 0);
228 	else
229 		subpage_prot_free(mm);
230 	destroy_contexts(&mm->context);
231 	mm->context.id = MMU_NO_CONTEXT;
232 }
233 
234 void arch_exit_mmap(struct mm_struct *mm)
235 {
236 	destroy_pagetable_cache(mm);
237 
238 	if (radix_enabled()) {
239 		/*
240 		 * Radix doesn't have a valid bit in the process table
241 		 * entries. However we know that at least P9 implementation
242 		 * will avoid caching an entry with an invalid RTS field,
243 		 * and 0 is invalid. So this will do.
244 		 *
245 		 * This runs before the "fullmm" tlb flush in exit_mmap,
246 		 * which does a RIC=2 tlbie to clear the process table
247 		 * entry. See the "fullmm" comments in tlb-radix.c.
248 		 *
249 		 * No barrier required here after the store because
250 		 * this process will do the invalidate, which starts with
251 		 * ptesync.
252 		 */
253 		process_tb[mm->context.id].prtb0 = 0;
254 	}
255 }
256 
257 #ifdef CONFIG_PPC_RADIX_MMU
258 void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
259 {
260 	mtspr(SPRN_PID, next->context.id);
261 	isync();
262 }
263 #endif
264