xref: /openbmc/linux/arch/x86/um/ldt.c (revision c0e297dc)
1 /*
2  * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/uaccess.h>
10 #include <asm/unistd.h>
11 #include <os.h>
12 #include <skas.h>
13 #include <sysdep/tls.h>
14 
15 extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
16 
17 static long write_ldt_entry(struct mm_id *mm_idp, int func,
18 		     struct user_desc *desc, void **addr, int done)
19 {
20 	long res;
21 	void *stub_addr;
22 	res = syscall_stub_data(mm_idp, (unsigned long *)desc,
23 				(sizeof(*desc) + sizeof(long) - 1) &
24 				    ~(sizeof(long) - 1),
25 				addr, &stub_addr);
26 	if (!res) {
27 		unsigned long args[] = { func,
28 					 (unsigned long)stub_addr,
29 					 sizeof(*desc),
30 					 0, 0, 0 };
31 		res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
32 				       0, addr, done);
33 	}
34 
35 	return res;
36 }
37 
38 /*
39  * In skas mode, we hold our own ldt data in UML.
40  * Thus, the code implementing sys_modify_ldt_skas
41  * is very similar to (and mostly stolen from) sys_modify_ldt
42  * for arch/i386/kernel/ldt.c
43  * The routines copied and modified in part are:
44  * - read_ldt
45  * - read_default_ldt
46  * - write_ldt
47  * - sys_modify_ldt_skas
48  */
49 
50 static int read_ldt(void __user * ptr, unsigned long bytecount)
51 {
52 	int i, err = 0;
53 	unsigned long size;
54 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
55 
56 	if (!ldt->entry_count)
57 		goto out;
58 	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
59 		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
60 	err = bytecount;
61 
62 	mutex_lock(&ldt->lock);
63 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
64 		size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
65 		if (size > bytecount)
66 			size = bytecount;
67 		if (copy_to_user(ptr, ldt->u.entries, size))
68 			err = -EFAULT;
69 		bytecount -= size;
70 		ptr += size;
71 	}
72 	else {
73 		for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
74 		     i++) {
75 			size = PAGE_SIZE;
76 			if (size > bytecount)
77 				size = bytecount;
78 			if (copy_to_user(ptr, ldt->u.pages[i], size)) {
79 				err = -EFAULT;
80 				break;
81 			}
82 			bytecount -= size;
83 			ptr += size;
84 		}
85 	}
86 	mutex_unlock(&ldt->lock);
87 
88 	if (bytecount == 0 || err == -EFAULT)
89 		goto out;
90 
91 	if (clear_user(ptr, bytecount))
92 		err = -EFAULT;
93 
94 out:
95 	return err;
96 }
97 
98 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
99 {
100 	int err;
101 
102 	if (bytecount > 5*LDT_ENTRY_SIZE)
103 		bytecount = 5*LDT_ENTRY_SIZE;
104 
105 	err = bytecount;
106 	/*
107 	 * UML doesn't support lcall7 and lcall27.
108 	 * So, we don't really have a default ldt, but emulate
109 	 * an empty ldt of common host default ldt size.
110 	 */
111 	if (clear_user(ptr, bytecount))
112 		err = -EFAULT;
113 
114 	return err;
115 }
116 
117 static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
118 {
119 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
120 	struct mm_id * mm_idp = &current->mm->context.id;
121 	int i, err;
122 	struct user_desc ldt_info;
123 	struct ldt_entry entry0, *ldt_p;
124 	void *addr = NULL;
125 
126 	err = -EINVAL;
127 	if (bytecount != sizeof(ldt_info))
128 		goto out;
129 	err = -EFAULT;
130 	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
131 		goto out;
132 
133 	err = -EINVAL;
134 	if (ldt_info.entry_number >= LDT_ENTRIES)
135 		goto out;
136 	if (ldt_info.contents == 3) {
137 		if (func == 1)
138 			goto out;
139 		if (ldt_info.seg_not_present == 0)
140 			goto out;
141 	}
142 
143 	mutex_lock(&ldt->lock);
144 
145 	err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
146 	if (err)
147 		goto out_unlock;
148 
149 	if (ldt_info.entry_number >= ldt->entry_count &&
150 	    ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
151 		for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
152 		     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
153 		     i++) {
154 			if (i == 0)
155 				memcpy(&entry0, ldt->u.entries,
156 				       sizeof(entry0));
157 			ldt->u.pages[i] = (struct ldt_entry *)
158 				__get_free_page(GFP_KERNEL|__GFP_ZERO);
159 			if (!ldt->u.pages[i]) {
160 				err = -ENOMEM;
161 				/* Undo the change in host */
162 				memset(&ldt_info, 0, sizeof(ldt_info));
163 				write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
164 				goto out_unlock;
165 			}
166 			if (i == 0) {
167 				memcpy(ldt->u.pages[0], &entry0,
168 				       sizeof(entry0));
169 				memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
170 				       sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
171 			}
172 			ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
173 		}
174 	}
175 	if (ldt->entry_count <= ldt_info.entry_number)
176 		ldt->entry_count = ldt_info.entry_number + 1;
177 
178 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
179 		ldt_p = ldt->u.entries + ldt_info.entry_number;
180 	else
181 		ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
182 			ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
183 
184 	if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
185 	   (func == 1 || LDT_empty(&ldt_info))) {
186 		ldt_p->a = 0;
187 		ldt_p->b = 0;
188 	}
189 	else{
190 		if (func == 1)
191 			ldt_info.useable = 0;
192 		ldt_p->a = LDT_entry_a(&ldt_info);
193 		ldt_p->b = LDT_entry_b(&ldt_info);
194 	}
195 	err = 0;
196 
197 out_unlock:
198 	mutex_unlock(&ldt->lock);
199 out:
200 	return err;
201 }
202 
203 static long do_modify_ldt_skas(int func, void __user *ptr,
204 			       unsigned long bytecount)
205 {
206 	int ret = -ENOSYS;
207 
208 	switch (func) {
209 		case 0:
210 			ret = read_ldt(ptr, bytecount);
211 			break;
212 		case 1:
213 		case 0x11:
214 			ret = write_ldt(ptr, bytecount, func);
215 			break;
216 		case 2:
217 			ret = read_default_ldt(ptr, bytecount);
218 			break;
219 	}
220 	return ret;
221 }
222 
223 static DEFINE_SPINLOCK(host_ldt_lock);
224 static short dummy_list[9] = {0, -1};
225 static short * host_ldt_entries = NULL;
226 
227 static void ldt_get_host_info(void)
228 {
229 	long ret;
230 	struct ldt_entry * ldt;
231 	short *tmp;
232 	int i, size, k, order;
233 
234 	spin_lock(&host_ldt_lock);
235 
236 	if (host_ldt_entries != NULL) {
237 		spin_unlock(&host_ldt_lock);
238 		return;
239 	}
240 	host_ldt_entries = dummy_list+1;
241 
242 	spin_unlock(&host_ldt_lock);
243 
244 	for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
245 		;
246 
247 	ldt = (struct ldt_entry *)
248 	      __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
249 	if (ldt == NULL) {
250 		printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
251 		       "for host ldt\n");
252 		return;
253 	}
254 
255 	ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
256 	if (ret < 0) {
257 		printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
258 		goto out_free;
259 	}
260 	if (ret == 0) {
261 		/* default_ldt is active, simply write an empty entry 0 */
262 		host_ldt_entries = dummy_list;
263 		goto out_free;
264 	}
265 
266 	for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
267 		if (ldt[i].a != 0 || ldt[i].b != 0)
268 			size++;
269 	}
270 
271 	if (size < ARRAY_SIZE(dummy_list))
272 		host_ldt_entries = dummy_list;
273 	else {
274 		size = (size + 1) * sizeof(dummy_list[0]);
275 		tmp = kmalloc(size, GFP_KERNEL);
276 		if (tmp == NULL) {
277 			printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
278 			       "host ldt list\n");
279 			goto out_free;
280 		}
281 		host_ldt_entries = tmp;
282 	}
283 
284 	for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
285 		if (ldt[i].a != 0 || ldt[i].b != 0)
286 			host_ldt_entries[k++] = i;
287 	}
288 	host_ldt_entries[k] = -1;
289 
290 out_free:
291 	free_pages((unsigned long)ldt, order);
292 }
293 
294 long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
295 {
296 	struct user_desc desc;
297 	short * num_p;
298 	int i;
299 	long page, err=0;
300 	void *addr = NULL;
301 
302 
303 	mutex_init(&new_mm->arch.ldt.lock);
304 
305 	if (!from_mm) {
306 		memset(&desc, 0, sizeof(desc));
307 		/*
308 		 * Now we try to retrieve info about the ldt, we
309 		 * inherited from the host. All ldt-entries found
310 		 * will be reset in the following loop
311 		 */
312 		ldt_get_host_info();
313 		for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
314 			desc.entry_number = *num_p;
315 			err = write_ldt_entry(&new_mm->id, 1, &desc,
316 					      &addr, *(num_p + 1) == -1);
317 			if (err)
318 				break;
319 		}
320 		new_mm->arch.ldt.entry_count = 0;
321 
322 		goto out;
323 	}
324 
325 	/*
326 	 * Our local LDT is used to supply the data for
327 	 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
328 	 * i.e., we have to use the stub for modify_ldt, which
329 	 * can't handle the big read buffer of up to 64kB.
330 	 */
331 	mutex_lock(&from_mm->arch.ldt.lock);
332 	if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
333 		memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
334 		       sizeof(new_mm->arch.ldt.u.entries));
335 	else {
336 		i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
337 		while (i-->0) {
338 			page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
339 			if (!page) {
340 				err = -ENOMEM;
341 				break;
342 			}
343 			new_mm->arch.ldt.u.pages[i] =
344 				(struct ldt_entry *) page;
345 			memcpy(new_mm->arch.ldt.u.pages[i],
346 			       from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
347 		}
348 	}
349 	new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
350 	mutex_unlock(&from_mm->arch.ldt.lock);
351 
352     out:
353 	return err;
354 }
355 
356 
357 void free_ldt(struct mm_context *mm)
358 {
359 	int i;
360 
361 	if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
362 		i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
363 		while (i-- > 0)
364 			free_page((long) mm->arch.ldt.u.pages[i]);
365 	}
366 	mm->arch.ldt.entry_count = 0;
367 }
368 
369 int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
370 {
371 	return do_modify_ldt_skas(func, ptr, bytecount);
372 }
373