xref: /openbmc/linux/arch/x86/um/ldt.c (revision c819e2cf)
1 /*
2  * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <asm/unistd.h>
10 #include <os.h>
11 #include <proc_mm.h>
12 #include <skas.h>
13 #include <skas_ptrace.h>
14 #include <sysdep/tls.h>
15 
16 extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
17 
18 static long write_ldt_entry(struct mm_id *mm_idp, int func,
19 		     struct user_desc *desc, void **addr, int done)
20 {
21 	long res;
22 
23 	if (proc_mm) {
24 		/*
25 		 * This is a special handling for the case, that the mm to
26 		 * modify isn't current->active_mm.
27 		 * If this is called directly by modify_ldt,
28 		 *     (current->active_mm->context.skas.u == mm_idp)
29 		 * will be true. So no call to __switch_mm(mm_idp) is done.
30 		 * If this is called in case of init_new_ldt or PTRACE_LDT,
31 		 * mm_idp won't belong to current->active_mm, but child->mm.
32 		 * So we need to switch child's mm into our userspace, then
33 		 * later switch back.
34 		 *
35 		 * Note: I'm unsure: should interrupts be disabled here?
36 		 */
37 		if (!current->active_mm || current->active_mm == &init_mm ||
38 		    mm_idp != &current->active_mm->context.id)
39 			__switch_mm(mm_idp);
40 	}
41 
42 	if (ptrace_ldt) {
43 		struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
44 			.func = func,
45 			.ptr = desc,
46 			.bytecount = sizeof(*desc)};
47 		u32 cpu;
48 		int pid;
49 
50 		if (!proc_mm)
51 			pid = mm_idp->u.pid;
52 		else {
53 			cpu = get_cpu();
54 			pid = userspace_pid[cpu];
55 		}
56 
57 		res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
58 
59 		if (proc_mm)
60 			put_cpu();
61 	}
62 	else {
63 		void *stub_addr;
64 		res = syscall_stub_data(mm_idp, (unsigned long *)desc,
65 					(sizeof(*desc) + sizeof(long) - 1) &
66 					    ~(sizeof(long) - 1),
67 					addr, &stub_addr);
68 		if (!res) {
69 			unsigned long args[] = { func,
70 						 (unsigned long)stub_addr,
71 						 sizeof(*desc),
72 						 0, 0, 0 };
73 			res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
74 					       0, addr, done);
75 		}
76 	}
77 
78 	if (proc_mm) {
79 		/*
80 		 * This is the second part of special handling, that makes
81 		 * PTRACE_LDT possible to implement.
82 		 */
83 		if (current->active_mm && current->active_mm != &init_mm &&
84 		    mm_idp != &current->active_mm->context.id)
85 			__switch_mm(&current->active_mm->context.id);
86 	}
87 
88 	return res;
89 }
90 
91 static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
92 {
93 	int res, n;
94 	struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
95 			.func = 0,
96 			.bytecount = bytecount,
97 			.ptr = kmalloc(bytecount, GFP_KERNEL)};
98 	u32 cpu;
99 
100 	if (ptrace_ldt.ptr == NULL)
101 		return -ENOMEM;
102 
103 	/*
104 	 * This is called from sys_modify_ldt only, so userspace_pid gives
105 	 * us the right number
106 	 */
107 
108 	cpu = get_cpu();
109 	res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
110 	put_cpu();
111 	if (res < 0)
112 		goto out;
113 
114 	n = copy_to_user(ptr, ptrace_ldt.ptr, res);
115 	if (n != 0)
116 		res = -EFAULT;
117 
118   out:
119 	kfree(ptrace_ldt.ptr);
120 
121 	return res;
122 }
123 
124 /*
125  * In skas mode, we hold our own ldt data in UML.
126  * Thus, the code implementing sys_modify_ldt_skas
127  * is very similar to (and mostly stolen from) sys_modify_ldt
128  * for arch/i386/kernel/ldt.c
129  * The routines copied and modified in part are:
130  * - read_ldt
131  * - read_default_ldt
132  * - write_ldt
133  * - sys_modify_ldt_skas
134  */
135 
136 static int read_ldt(void __user * ptr, unsigned long bytecount)
137 {
138 	int i, err = 0;
139 	unsigned long size;
140 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
141 
142 	if (!ldt->entry_count)
143 		goto out;
144 	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
145 		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
146 	err = bytecount;
147 
148 	if (ptrace_ldt)
149 		return read_ldt_from_host(ptr, bytecount);
150 
151 	mutex_lock(&ldt->lock);
152 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
153 		size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
154 		if (size > bytecount)
155 			size = bytecount;
156 		if (copy_to_user(ptr, ldt->u.entries, size))
157 			err = -EFAULT;
158 		bytecount -= size;
159 		ptr += size;
160 	}
161 	else {
162 		for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
163 		     i++) {
164 			size = PAGE_SIZE;
165 			if (size > bytecount)
166 				size = bytecount;
167 			if (copy_to_user(ptr, ldt->u.pages[i], size)) {
168 				err = -EFAULT;
169 				break;
170 			}
171 			bytecount -= size;
172 			ptr += size;
173 		}
174 	}
175 	mutex_unlock(&ldt->lock);
176 
177 	if (bytecount == 0 || err == -EFAULT)
178 		goto out;
179 
180 	if (clear_user(ptr, bytecount))
181 		err = -EFAULT;
182 
183 out:
184 	return err;
185 }
186 
187 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
188 {
189 	int err;
190 
191 	if (bytecount > 5*LDT_ENTRY_SIZE)
192 		bytecount = 5*LDT_ENTRY_SIZE;
193 
194 	err = bytecount;
195 	/*
196 	 * UML doesn't support lcall7 and lcall27.
197 	 * So, we don't really have a default ldt, but emulate
198 	 * an empty ldt of common host default ldt size.
199 	 */
200 	if (clear_user(ptr, bytecount))
201 		err = -EFAULT;
202 
203 	return err;
204 }
205 
206 static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
207 {
208 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
209 	struct mm_id * mm_idp = &current->mm->context.id;
210 	int i, err;
211 	struct user_desc ldt_info;
212 	struct ldt_entry entry0, *ldt_p;
213 	void *addr = NULL;
214 
215 	err = -EINVAL;
216 	if (bytecount != sizeof(ldt_info))
217 		goto out;
218 	err = -EFAULT;
219 	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
220 		goto out;
221 
222 	err = -EINVAL;
223 	if (ldt_info.entry_number >= LDT_ENTRIES)
224 		goto out;
225 	if (ldt_info.contents == 3) {
226 		if (func == 1)
227 			goto out;
228 		if (ldt_info.seg_not_present == 0)
229 			goto out;
230 	}
231 
232 	if (!ptrace_ldt)
233 		mutex_lock(&ldt->lock);
234 
235 	err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
236 	if (err)
237 		goto out_unlock;
238 	else if (ptrace_ldt) {
239 		/* With PTRACE_LDT available, this is used as a flag only */
240 		ldt->entry_count = 1;
241 		goto out;
242 	}
243 
244 	if (ldt_info.entry_number >= ldt->entry_count &&
245 	    ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
246 		for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
247 		     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
248 		     i++) {
249 			if (i == 0)
250 				memcpy(&entry0, ldt->u.entries,
251 				       sizeof(entry0));
252 			ldt->u.pages[i] = (struct ldt_entry *)
253 				__get_free_page(GFP_KERNEL|__GFP_ZERO);
254 			if (!ldt->u.pages[i]) {
255 				err = -ENOMEM;
256 				/* Undo the change in host */
257 				memset(&ldt_info, 0, sizeof(ldt_info));
258 				write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
259 				goto out_unlock;
260 			}
261 			if (i == 0) {
262 				memcpy(ldt->u.pages[0], &entry0,
263 				       sizeof(entry0));
264 				memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
265 				       sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
266 			}
267 			ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
268 		}
269 	}
270 	if (ldt->entry_count <= ldt_info.entry_number)
271 		ldt->entry_count = ldt_info.entry_number + 1;
272 
273 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
274 		ldt_p = ldt->u.entries + ldt_info.entry_number;
275 	else
276 		ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
277 			ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
278 
279 	if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
280 	   (func == 1 || LDT_empty(&ldt_info))) {
281 		ldt_p->a = 0;
282 		ldt_p->b = 0;
283 	}
284 	else{
285 		if (func == 1)
286 			ldt_info.useable = 0;
287 		ldt_p->a = LDT_entry_a(&ldt_info);
288 		ldt_p->b = LDT_entry_b(&ldt_info);
289 	}
290 	err = 0;
291 
292 out_unlock:
293 	mutex_unlock(&ldt->lock);
294 out:
295 	return err;
296 }
297 
298 static long do_modify_ldt_skas(int func, void __user *ptr,
299 			       unsigned long bytecount)
300 {
301 	int ret = -ENOSYS;
302 
303 	switch (func) {
304 		case 0:
305 			ret = read_ldt(ptr, bytecount);
306 			break;
307 		case 1:
308 		case 0x11:
309 			ret = write_ldt(ptr, bytecount, func);
310 			break;
311 		case 2:
312 			ret = read_default_ldt(ptr, bytecount);
313 			break;
314 	}
315 	return ret;
316 }
317 
318 static DEFINE_SPINLOCK(host_ldt_lock);
319 static short dummy_list[9] = {0, -1};
320 static short * host_ldt_entries = NULL;
321 
322 static void ldt_get_host_info(void)
323 {
324 	long ret;
325 	struct ldt_entry * ldt;
326 	short *tmp;
327 	int i, size, k, order;
328 
329 	spin_lock(&host_ldt_lock);
330 
331 	if (host_ldt_entries != NULL) {
332 		spin_unlock(&host_ldt_lock);
333 		return;
334 	}
335 	host_ldt_entries = dummy_list+1;
336 
337 	spin_unlock(&host_ldt_lock);
338 
339 	for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
340 		;
341 
342 	ldt = (struct ldt_entry *)
343 	      __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
344 	if (ldt == NULL) {
345 		printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
346 		       "for host ldt\n");
347 		return;
348 	}
349 
350 	ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
351 	if (ret < 0) {
352 		printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
353 		goto out_free;
354 	}
355 	if (ret == 0) {
356 		/* default_ldt is active, simply write an empty entry 0 */
357 		host_ldt_entries = dummy_list;
358 		goto out_free;
359 	}
360 
361 	for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
362 		if (ldt[i].a != 0 || ldt[i].b != 0)
363 			size++;
364 	}
365 
366 	if (size < ARRAY_SIZE(dummy_list))
367 		host_ldt_entries = dummy_list;
368 	else {
369 		size = (size + 1) * sizeof(dummy_list[0]);
370 		tmp = kmalloc(size, GFP_KERNEL);
371 		if (tmp == NULL) {
372 			printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
373 			       "host ldt list\n");
374 			goto out_free;
375 		}
376 		host_ldt_entries = tmp;
377 	}
378 
379 	for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
380 		if (ldt[i].a != 0 || ldt[i].b != 0)
381 			host_ldt_entries[k++] = i;
382 	}
383 	host_ldt_entries[k] = -1;
384 
385 out_free:
386 	free_pages((unsigned long)ldt, order);
387 }
388 
389 long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
390 {
391 	struct user_desc desc;
392 	short * num_p;
393 	int i;
394 	long page, err=0;
395 	void *addr = NULL;
396 	struct proc_mm_op copy;
397 
398 
399 	if (!ptrace_ldt)
400 		mutex_init(&new_mm->arch.ldt.lock);
401 
402 	if (!from_mm) {
403 		memset(&desc, 0, sizeof(desc));
404 		/*
405 		 * We have to initialize a clean ldt.
406 		 */
407 		if (proc_mm) {
408 			/*
409 			 * If the new mm was created using proc_mm, host's
410 			 * default-ldt currently is assigned, which normally
411 			 * contains the call-gates for lcall7 and lcall27.
412 			 * To remove these gates, we simply write an empty
413 			 * entry as number 0 to the host.
414 			 */
415 			err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
416 		}
417 		else{
418 			/*
419 			 * Now we try to retrieve info about the ldt, we
420 			 * inherited from the host. All ldt-entries found
421 			 * will be reset in the following loop
422 			 */
423 			ldt_get_host_info();
424 			for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
425 				desc.entry_number = *num_p;
426 				err = write_ldt_entry(&new_mm->id, 1, &desc,
427 						      &addr, *(num_p + 1) == -1);
428 				if (err)
429 					break;
430 			}
431 		}
432 		new_mm->arch.ldt.entry_count = 0;
433 
434 		goto out;
435 	}
436 
437 	if (proc_mm) {
438 		/*
439 		 * We have a valid from_mm, so we now have to copy the LDT of
440 		 * from_mm to new_mm, because using proc_mm an new mm with
441 		 * an empty/default LDT was created in new_mm()
442 		 */
443 		copy = ((struct proc_mm_op) { .op 	= MM_COPY_SEGMENTS,
444 					      .u 	=
445 					      { .copy_segments =
446 							from_mm->id.u.mm_fd } } );
447 		i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
448 		if (i != sizeof(copy))
449 			printk(KERN_ERR "new_mm : /proc/mm copy_segments "
450 			       "failed, err = %d\n", -i);
451 	}
452 
453 	if (!ptrace_ldt) {
454 		/*
455 		 * Our local LDT is used to supply the data for
456 		 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
457 		 * i.e., we have to use the stub for modify_ldt, which
458 		 * can't handle the big read buffer of up to 64kB.
459 		 */
460 		mutex_lock(&from_mm->arch.ldt.lock);
461 		if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
462 			memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
463 			       sizeof(new_mm->arch.ldt.u.entries));
464 		else {
465 			i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
466 			while (i-->0) {
467 				page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
468 				if (!page) {
469 					err = -ENOMEM;
470 					break;
471 				}
472 				new_mm->arch.ldt.u.pages[i] =
473 					(struct ldt_entry *) page;
474 				memcpy(new_mm->arch.ldt.u.pages[i],
475 				       from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
476 			}
477 		}
478 		new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
479 		mutex_unlock(&from_mm->arch.ldt.lock);
480 	}
481 
482     out:
483 	return err;
484 }
485 
486 
487 void free_ldt(struct mm_context *mm)
488 {
489 	int i;
490 
491 	if (!ptrace_ldt && mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
492 		i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
493 		while (i-- > 0)
494 			free_page((long) mm->arch.ldt.u.pages[i]);
495 	}
496 	mm->arch.ldt.entry_count = 0;
497 }
498 
499 int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
500 {
501 	return do_modify_ldt_skas(func, ptr, bytecount);
502 }
503