xref: /openbmc/linux/arch/x86/um/ldt.c (revision 23c2b932)
1 /*
2  * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/uaccess.h>
10 #include <asm/unistd.h>
11 #include <os.h>
12 #include <skas.h>
13 #include <sysdep/tls.h>
14 
15 static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
16 {
17 	return syscall(__NR_modify_ldt, func, ptr, bytecount);
18 }
19 
20 static long write_ldt_entry(struct mm_id *mm_idp, int func,
21 		     struct user_desc *desc, void **addr, int done)
22 {
23 	long res;
24 	void *stub_addr;
25 	res = syscall_stub_data(mm_idp, (unsigned long *)desc,
26 				(sizeof(*desc) + sizeof(long) - 1) &
27 				    ~(sizeof(long) - 1),
28 				addr, &stub_addr);
29 	if (!res) {
30 		unsigned long args[] = { func,
31 					 (unsigned long)stub_addr,
32 					 sizeof(*desc),
33 					 0, 0, 0 };
34 		res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
35 				       0, addr, done);
36 	}
37 
38 	return res;
39 }
40 
41 /*
42  * In skas mode, we hold our own ldt data in UML.
43  * Thus, the code implementing sys_modify_ldt_skas
44  * is very similar to (and mostly stolen from) sys_modify_ldt
45  * for arch/i386/kernel/ldt.c
46  * The routines copied and modified in part are:
47  * - read_ldt
48  * - read_default_ldt
49  * - write_ldt
50  * - sys_modify_ldt_skas
51  */
52 
53 static int read_ldt(void __user * ptr, unsigned long bytecount)
54 {
55 	int i, err = 0;
56 	unsigned long size;
57 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
58 
59 	if (!ldt->entry_count)
60 		goto out;
61 	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
62 		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
63 	err = bytecount;
64 
65 	mutex_lock(&ldt->lock);
66 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
67 		size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
68 		if (size > bytecount)
69 			size = bytecount;
70 		if (copy_to_user(ptr, ldt->u.entries, size))
71 			err = -EFAULT;
72 		bytecount -= size;
73 		ptr += size;
74 	}
75 	else {
76 		for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
77 		     i++) {
78 			size = PAGE_SIZE;
79 			if (size > bytecount)
80 				size = bytecount;
81 			if (copy_to_user(ptr, ldt->u.pages[i], size)) {
82 				err = -EFAULT;
83 				break;
84 			}
85 			bytecount -= size;
86 			ptr += size;
87 		}
88 	}
89 	mutex_unlock(&ldt->lock);
90 
91 	if (bytecount == 0 || err == -EFAULT)
92 		goto out;
93 
94 	if (clear_user(ptr, bytecount))
95 		err = -EFAULT;
96 
97 out:
98 	return err;
99 }
100 
101 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
102 {
103 	int err;
104 
105 	if (bytecount > 5*LDT_ENTRY_SIZE)
106 		bytecount = 5*LDT_ENTRY_SIZE;
107 
108 	err = bytecount;
109 	/*
110 	 * UML doesn't support lcall7 and lcall27.
111 	 * So, we don't really have a default ldt, but emulate
112 	 * an empty ldt of common host default ldt size.
113 	 */
114 	if (clear_user(ptr, bytecount))
115 		err = -EFAULT;
116 
117 	return err;
118 }
119 
120 static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
121 {
122 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
123 	struct mm_id * mm_idp = &current->mm->context.id;
124 	int i, err;
125 	struct user_desc ldt_info;
126 	struct ldt_entry entry0, *ldt_p;
127 	void *addr = NULL;
128 
129 	err = -EINVAL;
130 	if (bytecount != sizeof(ldt_info))
131 		goto out;
132 	err = -EFAULT;
133 	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
134 		goto out;
135 
136 	err = -EINVAL;
137 	if (ldt_info.entry_number >= LDT_ENTRIES)
138 		goto out;
139 	if (ldt_info.contents == 3) {
140 		if (func == 1)
141 			goto out;
142 		if (ldt_info.seg_not_present == 0)
143 			goto out;
144 	}
145 
146 	mutex_lock(&ldt->lock);
147 
148 	err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
149 	if (err)
150 		goto out_unlock;
151 
152 	if (ldt_info.entry_number >= ldt->entry_count &&
153 	    ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
154 		for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
155 		     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
156 		     i++) {
157 			if (i == 0)
158 				memcpy(&entry0, ldt->u.entries,
159 				       sizeof(entry0));
160 			ldt->u.pages[i] = (struct ldt_entry *)
161 				__get_free_page(GFP_KERNEL|__GFP_ZERO);
162 			if (!ldt->u.pages[i]) {
163 				err = -ENOMEM;
164 				/* Undo the change in host */
165 				memset(&ldt_info, 0, sizeof(ldt_info));
166 				write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
167 				goto out_unlock;
168 			}
169 			if (i == 0) {
170 				memcpy(ldt->u.pages[0], &entry0,
171 				       sizeof(entry0));
172 				memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
173 				       sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
174 			}
175 			ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
176 		}
177 	}
178 	if (ldt->entry_count <= ldt_info.entry_number)
179 		ldt->entry_count = ldt_info.entry_number + 1;
180 
181 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
182 		ldt_p = ldt->u.entries + ldt_info.entry_number;
183 	else
184 		ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
185 			ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
186 
187 	if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
188 	   (func == 1 || LDT_empty(&ldt_info))) {
189 		ldt_p->a = 0;
190 		ldt_p->b = 0;
191 	}
192 	else{
193 		if (func == 1)
194 			ldt_info.useable = 0;
195 		ldt_p->a = LDT_entry_a(&ldt_info);
196 		ldt_p->b = LDT_entry_b(&ldt_info);
197 	}
198 	err = 0;
199 
200 out_unlock:
201 	mutex_unlock(&ldt->lock);
202 out:
203 	return err;
204 }
205 
206 static long do_modify_ldt_skas(int func, void __user *ptr,
207 			       unsigned long bytecount)
208 {
209 	int ret = -ENOSYS;
210 
211 	switch (func) {
212 		case 0:
213 			ret = read_ldt(ptr, bytecount);
214 			break;
215 		case 1:
216 		case 0x11:
217 			ret = write_ldt(ptr, bytecount, func);
218 			break;
219 		case 2:
220 			ret = read_default_ldt(ptr, bytecount);
221 			break;
222 	}
223 	return ret;
224 }
225 
226 static DEFINE_SPINLOCK(host_ldt_lock);
227 static short dummy_list[9] = {0, -1};
228 static short * host_ldt_entries = NULL;
229 
230 static void ldt_get_host_info(void)
231 {
232 	long ret;
233 	struct ldt_entry * ldt;
234 	short *tmp;
235 	int i, size, k, order;
236 
237 	spin_lock(&host_ldt_lock);
238 
239 	if (host_ldt_entries != NULL) {
240 		spin_unlock(&host_ldt_lock);
241 		return;
242 	}
243 	host_ldt_entries = dummy_list+1;
244 
245 	spin_unlock(&host_ldt_lock);
246 
247 	for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
248 		;
249 
250 	ldt = (struct ldt_entry *)
251 	      __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
252 	if (ldt == NULL) {
253 		printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
254 		       "for host ldt\n");
255 		return;
256 	}
257 
258 	ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
259 	if (ret < 0) {
260 		printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
261 		goto out_free;
262 	}
263 	if (ret == 0) {
264 		/* default_ldt is active, simply write an empty entry 0 */
265 		host_ldt_entries = dummy_list;
266 		goto out_free;
267 	}
268 
269 	for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
270 		if (ldt[i].a != 0 || ldt[i].b != 0)
271 			size++;
272 	}
273 
274 	if (size < ARRAY_SIZE(dummy_list))
275 		host_ldt_entries = dummy_list;
276 	else {
277 		size = (size + 1) * sizeof(dummy_list[0]);
278 		tmp = kmalloc(size, GFP_KERNEL);
279 		if (tmp == NULL) {
280 			printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
281 			       "host ldt list\n");
282 			goto out_free;
283 		}
284 		host_ldt_entries = tmp;
285 	}
286 
287 	for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
288 		if (ldt[i].a != 0 || ldt[i].b != 0)
289 			host_ldt_entries[k++] = i;
290 	}
291 	host_ldt_entries[k] = -1;
292 
293 out_free:
294 	free_pages((unsigned long)ldt, order);
295 }
296 
297 long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
298 {
299 	struct user_desc desc;
300 	short * num_p;
301 	int i;
302 	long page, err=0;
303 	void *addr = NULL;
304 
305 
306 	mutex_init(&new_mm->arch.ldt.lock);
307 
308 	if (!from_mm) {
309 		memset(&desc, 0, sizeof(desc));
310 		/*
311 		 * Now we try to retrieve info about the ldt, we
312 		 * inherited from the host. All ldt-entries found
313 		 * will be reset in the following loop
314 		 */
315 		ldt_get_host_info();
316 		for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
317 			desc.entry_number = *num_p;
318 			err = write_ldt_entry(&new_mm->id, 1, &desc,
319 					      &addr, *(num_p + 1) == -1);
320 			if (err)
321 				break;
322 		}
323 		new_mm->arch.ldt.entry_count = 0;
324 
325 		goto out;
326 	}
327 
328 	/*
329 	 * Our local LDT is used to supply the data for
330 	 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
331 	 * i.e., we have to use the stub for modify_ldt, which
332 	 * can't handle the big read buffer of up to 64kB.
333 	 */
334 	mutex_lock(&from_mm->arch.ldt.lock);
335 	if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
336 		memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
337 		       sizeof(new_mm->arch.ldt.u.entries));
338 	else {
339 		i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
340 		while (i-->0) {
341 			page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
342 			if (!page) {
343 				err = -ENOMEM;
344 				break;
345 			}
346 			new_mm->arch.ldt.u.pages[i] =
347 				(struct ldt_entry *) page;
348 			memcpy(new_mm->arch.ldt.u.pages[i],
349 			       from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
350 		}
351 	}
352 	new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
353 	mutex_unlock(&from_mm->arch.ldt.lock);
354 
355     out:
356 	return err;
357 }
358 
359 
360 void free_ldt(struct mm_context *mm)
361 {
362 	int i;
363 
364 	if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
365 		i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
366 		while (i-- > 0)
367 			free_page((long) mm->arch.ldt.u.pages[i]);
368 	}
369 	mm->arch.ldt.entry_count = 0;
370 }
371 
372 int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
373 {
374 	return do_modify_ldt_skas(func, ptr, bytecount);
375 }
376