xref: /openbmc/linux/arch/x86/kernel/ldt.c (revision 9d64fc08)
1 /*
2  * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4  * Copyright (C) 2002 Andi Kleen
5  *
6  * This handles calls from both 32bit and 64bit mode.
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
18 
19 #include <asm/ldt.h>
20 #include <asm/desc.h>
21 #include <asm/mmu_context.h>
22 #include <asm/syscalls.h>
23 
24 static void refresh_ldt_segments(void)
25 {
26 #ifdef CONFIG_X86_64
27 	unsigned short sel;
28 
29 	/*
30 	 * Make sure that the cached DS and ES descriptors match the updated
31 	 * LDT.
32 	 */
33 	savesegment(ds, sel);
34 	if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
35 		loadsegment(ds, sel);
36 
37 	savesegment(es, sel);
38 	if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
39 		loadsegment(es, sel);
40 #endif
41 }
42 
43 /* context.lock is held for us, so we don't need any locking. */
44 static void flush_ldt(void *__mm)
45 {
46 	struct mm_struct *mm = __mm;
47 	mm_context_t *pc;
48 
49 	if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
50 		return;
51 
52 	pc = &mm->context;
53 	set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
54 
55 	refresh_ldt_segments();
56 }
57 
58 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
59 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
60 {
61 	struct ldt_struct *new_ldt;
62 	unsigned int alloc_size;
63 
64 	if (num_entries > LDT_ENTRIES)
65 		return NULL;
66 
67 	new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
68 	if (!new_ldt)
69 		return NULL;
70 
71 	BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
72 	alloc_size = num_entries * LDT_ENTRY_SIZE;
73 
74 	/*
75 	 * Xen is very picky: it requires a page-aligned LDT that has no
76 	 * trailing nonzero bytes in any page that contains LDT descriptors.
77 	 * Keep it simple: zero the whole allocation and never allocate less
78 	 * than PAGE_SIZE.
79 	 */
80 	if (alloc_size > PAGE_SIZE)
81 		new_ldt->entries = vzalloc(alloc_size);
82 	else
83 		new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
84 
85 	if (!new_ldt->entries) {
86 		kfree(new_ldt);
87 		return NULL;
88 	}
89 
90 	new_ldt->nr_entries = num_entries;
91 	return new_ldt;
92 }
93 
94 /* After calling this, the LDT is immutable. */
95 static void finalize_ldt_struct(struct ldt_struct *ldt)
96 {
97 	paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
98 }
99 
100 /* context.lock is held */
101 static void install_ldt(struct mm_struct *current_mm,
102 			struct ldt_struct *ldt)
103 {
104 	/* Synchronizes with lockless_dereference in load_mm_ldt. */
105 	smp_store_release(&current_mm->context.ldt, ldt);
106 
107 	/* Activate the LDT for all CPUs using current_mm. */
108 	on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
109 }
110 
111 static void free_ldt_struct(struct ldt_struct *ldt)
112 {
113 	if (likely(!ldt))
114 		return;
115 
116 	paravirt_free_ldt(ldt->entries, ldt->nr_entries);
117 	if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
118 		vfree_atomic(ldt->entries);
119 	else
120 		free_page((unsigned long)ldt->entries);
121 	kfree(ldt);
122 }
123 
124 /*
125  * we do not have to muck with descriptors here, that is
126  * done in switch_mm() as needed.
127  */
128 int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
129 {
130 	struct ldt_struct *new_ldt;
131 	struct mm_struct *old_mm;
132 	int retval = 0;
133 
134 	mutex_init(&mm->context.lock);
135 	old_mm = current->mm;
136 	if (!old_mm) {
137 		mm->context.ldt = NULL;
138 		return 0;
139 	}
140 
141 	mutex_lock(&old_mm->context.lock);
142 	if (!old_mm->context.ldt) {
143 		mm->context.ldt = NULL;
144 		goto out_unlock;
145 	}
146 
147 	new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
148 	if (!new_ldt) {
149 		retval = -ENOMEM;
150 		goto out_unlock;
151 	}
152 
153 	memcpy(new_ldt->entries, old_mm->context.ldt->entries,
154 	       new_ldt->nr_entries * LDT_ENTRY_SIZE);
155 	finalize_ldt_struct(new_ldt);
156 
157 	mm->context.ldt = new_ldt;
158 
159 out_unlock:
160 	mutex_unlock(&old_mm->context.lock);
161 	return retval;
162 }
163 
164 /*
165  * No need to lock the MM as we are the last user
166  *
167  * 64bit: Don't touch the LDT register - we're already in the next thread.
168  */
169 void destroy_context_ldt(struct mm_struct *mm)
170 {
171 	free_ldt_struct(mm->context.ldt);
172 	mm->context.ldt = NULL;
173 }
174 
175 static int read_ldt(void __user *ptr, unsigned long bytecount)
176 {
177 	struct mm_struct *mm = current->mm;
178 	unsigned long entries_size;
179 	int retval;
180 
181 	mutex_lock(&mm->context.lock);
182 
183 	if (!mm->context.ldt) {
184 		retval = 0;
185 		goto out_unlock;
186 	}
187 
188 	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
189 		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
190 
191 	entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
192 	if (entries_size > bytecount)
193 		entries_size = bytecount;
194 
195 	if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
196 		retval = -EFAULT;
197 		goto out_unlock;
198 	}
199 
200 	if (entries_size != bytecount) {
201 		/* Zero-fill the rest and pretend we read bytecount bytes. */
202 		if (clear_user(ptr + entries_size, bytecount - entries_size)) {
203 			retval = -EFAULT;
204 			goto out_unlock;
205 		}
206 	}
207 	retval = bytecount;
208 
209 out_unlock:
210 	mutex_unlock(&mm->context.lock);
211 	return retval;
212 }
213 
214 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
215 {
216 	/* CHECKME: Can we use _one_ random number ? */
217 #ifdef CONFIG_X86_32
218 	unsigned long size = 5 * sizeof(struct desc_struct);
219 #else
220 	unsigned long size = 128;
221 #endif
222 	if (bytecount > size)
223 		bytecount = size;
224 	if (clear_user(ptr, bytecount))
225 		return -EFAULT;
226 	return bytecount;
227 }
228 
229 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
230 {
231 	struct mm_struct *mm = current->mm;
232 	struct ldt_struct *new_ldt, *old_ldt;
233 	unsigned int old_nr_entries, new_nr_entries;
234 	struct user_desc ldt_info;
235 	struct desc_struct ldt;
236 	int error;
237 
238 	error = -EINVAL;
239 	if (bytecount != sizeof(ldt_info))
240 		goto out;
241 	error = -EFAULT;
242 	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
243 		goto out;
244 
245 	error = -EINVAL;
246 	if (ldt_info.entry_number >= LDT_ENTRIES)
247 		goto out;
248 	if (ldt_info.contents == 3) {
249 		if (oldmode)
250 			goto out;
251 		if (ldt_info.seg_not_present == 0)
252 			goto out;
253 	}
254 
255 	if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
256 	    LDT_empty(&ldt_info)) {
257 		/* The user wants to clear the entry. */
258 		memset(&ldt, 0, sizeof(ldt));
259 	} else {
260 		if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
261 			error = -EINVAL;
262 			goto out;
263 		}
264 
265 		fill_ldt(&ldt, &ldt_info);
266 		if (oldmode)
267 			ldt.avl = 0;
268 	}
269 
270 	mutex_lock(&mm->context.lock);
271 
272 	old_ldt       = mm->context.ldt;
273 	old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
274 	new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
275 
276 	error = -ENOMEM;
277 	new_ldt = alloc_ldt_struct(new_nr_entries);
278 	if (!new_ldt)
279 		goto out_unlock;
280 
281 	if (old_ldt)
282 		memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
283 
284 	new_ldt->entries[ldt_info.entry_number] = ldt;
285 	finalize_ldt_struct(new_ldt);
286 
287 	install_ldt(mm, new_ldt);
288 	free_ldt_struct(old_ldt);
289 	error = 0;
290 
291 out_unlock:
292 	mutex_unlock(&mm->context.lock);
293 out:
294 	return error;
295 }
296 
297 asmlinkage int sys_modify_ldt(int func, void __user *ptr,
298 			      unsigned long bytecount)
299 {
300 	int ret = -ENOSYS;
301 
302 	switch (func) {
303 	case 0:
304 		ret = read_ldt(ptr, bytecount);
305 		break;
306 	case 1:
307 		ret = write_ldt(ptr, bytecount, 1);
308 		break;
309 	case 2:
310 		ret = read_default_ldt(ptr, bytecount);
311 		break;
312 	case 0x11:
313 		ret = write_ldt(ptr, bytecount, 0);
314 		break;
315 	}
316 	return ret;
317 }
318