1 /* 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/syscalls.h> 10 #include <linux/uaccess.h> 11 #include <asm/unistd.h> 12 #include <os.h> 13 #include <skas.h> 14 #include <sysdep/tls.h> 15 16 static inline int modify_ldt (int func, void *ptr, unsigned long bytecount) 17 { 18 return syscall(__NR_modify_ldt, func, ptr, bytecount); 19 } 20 21 static long write_ldt_entry(struct mm_id *mm_idp, int func, 22 struct user_desc *desc, void **addr, int done) 23 { 24 long res; 25 void *stub_addr; 26 res = syscall_stub_data(mm_idp, (unsigned long *)desc, 27 (sizeof(*desc) + sizeof(long) - 1) & 28 ~(sizeof(long) - 1), 29 addr, &stub_addr); 30 if (!res) { 31 unsigned long args[] = { func, 32 (unsigned long)stub_addr, 33 sizeof(*desc), 34 0, 0, 0 }; 35 res = run_syscall_stub(mm_idp, __NR_modify_ldt, args, 36 0, addr, done); 37 } 38 39 return res; 40 } 41 42 /* 43 * In skas mode, we hold our own ldt data in UML. 44 * Thus, the code implementing sys_modify_ldt_skas 45 * is very similar to (and mostly stolen from) sys_modify_ldt 46 * for arch/i386/kernel/ldt.c 47 * The routines copied and modified in part are: 48 * - read_ldt 49 * - read_default_ldt 50 * - write_ldt 51 * - sys_modify_ldt_skas 52 */ 53 54 static int read_ldt(void __user * ptr, unsigned long bytecount) 55 { 56 int i, err = 0; 57 unsigned long size; 58 uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; 59 60 if (!ldt->entry_count) 61 goto out; 62 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) 63 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; 64 err = bytecount; 65 66 mutex_lock(&ldt->lock); 67 if (ldt->entry_count <= LDT_DIRECT_ENTRIES) { 68 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; 69 if (size > bytecount) 70 size = bytecount; 71 if (copy_to_user(ptr, ldt->u.entries, size)) 72 err = -EFAULT; 73 bytecount -= size; 74 ptr += size; 75 } 76 else { 77 for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; 78 i++) { 79 size = PAGE_SIZE; 80 if (size > bytecount) 81 size = bytecount; 82 if (copy_to_user(ptr, ldt->u.pages[i], size)) { 83 err = -EFAULT; 84 break; 85 } 86 bytecount -= size; 87 ptr += size; 88 } 89 } 90 mutex_unlock(&ldt->lock); 91 92 if (bytecount == 0 || err == -EFAULT) 93 goto out; 94 95 if (clear_user(ptr, bytecount)) 96 err = -EFAULT; 97 98 out: 99 return err; 100 } 101 102 static int read_default_ldt(void __user * ptr, unsigned long bytecount) 103 { 104 int err; 105 106 if (bytecount > 5*LDT_ENTRY_SIZE) 107 bytecount = 5*LDT_ENTRY_SIZE; 108 109 err = bytecount; 110 /* 111 * UML doesn't support lcall7 and lcall27. 112 * So, we don't really have a default ldt, but emulate 113 * an empty ldt of common host default ldt size. 114 */ 115 if (clear_user(ptr, bytecount)) 116 err = -EFAULT; 117 118 return err; 119 } 120 121 static int write_ldt(void __user * ptr, unsigned long bytecount, int func) 122 { 123 uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; 124 struct mm_id * mm_idp = ¤t->mm->context.id; 125 int i, err; 126 struct user_desc ldt_info; 127 struct ldt_entry entry0, *ldt_p; 128 void *addr = NULL; 129 130 err = -EINVAL; 131 if (bytecount != sizeof(ldt_info)) 132 goto out; 133 err = -EFAULT; 134 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) 135 goto out; 136 137 err = -EINVAL; 138 if (ldt_info.entry_number >= LDT_ENTRIES) 139 goto out; 140 if (ldt_info.contents == 3) { 141 if (func == 1) 142 goto out; 143 if (ldt_info.seg_not_present == 0) 144 goto out; 145 } 146 147 mutex_lock(&ldt->lock); 148 149 err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); 150 if (err) 151 goto out_unlock; 152 153 if (ldt_info.entry_number >= ldt->entry_count && 154 ldt_info.entry_number >= LDT_DIRECT_ENTRIES) { 155 for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; 156 i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; 157 i++) { 158 if (i == 0) 159 memcpy(&entry0, ldt->u.entries, 160 sizeof(entry0)); 161 ldt->u.pages[i] = (struct ldt_entry *) 162 __get_free_page(GFP_KERNEL|__GFP_ZERO); 163 if (!ldt->u.pages[i]) { 164 err = -ENOMEM; 165 /* Undo the change in host */ 166 memset(&ldt_info, 0, sizeof(ldt_info)); 167 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); 168 goto out_unlock; 169 } 170 if (i == 0) { 171 memcpy(ldt->u.pages[0], &entry0, 172 sizeof(entry0)); 173 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, 174 sizeof(entry0)*(LDT_DIRECT_ENTRIES-1)); 175 } 176 ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; 177 } 178 } 179 if (ldt->entry_count <= ldt_info.entry_number) 180 ldt->entry_count = ldt_info.entry_number + 1; 181 182 if (ldt->entry_count <= LDT_DIRECT_ENTRIES) 183 ldt_p = ldt->u.entries + ldt_info.entry_number; 184 else 185 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + 186 ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; 187 188 if (ldt_info.base_addr == 0 && ldt_info.limit == 0 && 189 (func == 1 || LDT_empty(&ldt_info))) { 190 ldt_p->a = 0; 191 ldt_p->b = 0; 192 } 193 else{ 194 if (func == 1) 195 ldt_info.useable = 0; 196 ldt_p->a = LDT_entry_a(&ldt_info); 197 ldt_p->b = LDT_entry_b(&ldt_info); 198 } 199 err = 0; 200 201 out_unlock: 202 mutex_unlock(&ldt->lock); 203 out: 204 return err; 205 } 206 207 static long do_modify_ldt_skas(int func, void __user *ptr, 208 unsigned long bytecount) 209 { 210 int ret = -ENOSYS; 211 212 switch (func) { 213 case 0: 214 ret = read_ldt(ptr, bytecount); 215 break; 216 case 1: 217 case 0x11: 218 ret = write_ldt(ptr, bytecount, func); 219 break; 220 case 2: 221 ret = read_default_ldt(ptr, bytecount); 222 break; 223 } 224 return ret; 225 } 226 227 static DEFINE_SPINLOCK(host_ldt_lock); 228 static short dummy_list[9] = {0, -1}; 229 static short * host_ldt_entries = NULL; 230 231 static void ldt_get_host_info(void) 232 { 233 long ret; 234 struct ldt_entry * ldt; 235 short *tmp; 236 int i, size, k, order; 237 238 spin_lock(&host_ldt_lock); 239 240 if (host_ldt_entries != NULL) { 241 spin_unlock(&host_ldt_lock); 242 return; 243 } 244 host_ldt_entries = dummy_list+1; 245 246 spin_unlock(&host_ldt_lock); 247 248 for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++) 249 ; 250 251 ldt = (struct ldt_entry *) 252 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); 253 if (ldt == NULL) { 254 printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer " 255 "for host ldt\n"); 256 return; 257 } 258 259 ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); 260 if (ret < 0) { 261 printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n"); 262 goto out_free; 263 } 264 if (ret == 0) { 265 /* default_ldt is active, simply write an empty entry 0 */ 266 host_ldt_entries = dummy_list; 267 goto out_free; 268 } 269 270 for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) { 271 if (ldt[i].a != 0 || ldt[i].b != 0) 272 size++; 273 } 274 275 if (size < ARRAY_SIZE(dummy_list)) 276 host_ldt_entries = dummy_list; 277 else { 278 size = (size + 1) * sizeof(dummy_list[0]); 279 tmp = kmalloc(size, GFP_KERNEL); 280 if (tmp == NULL) { 281 printk(KERN_ERR "ldt_get_host_info: couldn't allocate " 282 "host ldt list\n"); 283 goto out_free; 284 } 285 host_ldt_entries = tmp; 286 } 287 288 for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) { 289 if (ldt[i].a != 0 || ldt[i].b != 0) 290 host_ldt_entries[k++] = i; 291 } 292 host_ldt_entries[k] = -1; 293 294 out_free: 295 free_pages((unsigned long)ldt, order); 296 } 297 298 long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm) 299 { 300 struct user_desc desc; 301 short * num_p; 302 int i; 303 long page, err=0; 304 void *addr = NULL; 305 306 307 mutex_init(&new_mm->arch.ldt.lock); 308 309 if (!from_mm) { 310 memset(&desc, 0, sizeof(desc)); 311 /* 312 * Now we try to retrieve info about the ldt, we 313 * inherited from the host. All ldt-entries found 314 * will be reset in the following loop 315 */ 316 ldt_get_host_info(); 317 for (num_p=host_ldt_entries; *num_p != -1; num_p++) { 318 desc.entry_number = *num_p; 319 err = write_ldt_entry(&new_mm->id, 1, &desc, 320 &addr, *(num_p + 1) == -1); 321 if (err) 322 break; 323 } 324 new_mm->arch.ldt.entry_count = 0; 325 326 goto out; 327 } 328 329 /* 330 * Our local LDT is used to supply the data for 331 * modify_ldt(READLDT), if PTRACE_LDT isn't available, 332 * i.e., we have to use the stub for modify_ldt, which 333 * can't handle the big read buffer of up to 64kB. 334 */ 335 mutex_lock(&from_mm->arch.ldt.lock); 336 if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES) 337 memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, 338 sizeof(new_mm->arch.ldt.u.entries)); 339 else { 340 i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; 341 while (i-->0) { 342 page = __get_free_page(GFP_KERNEL|__GFP_ZERO); 343 if (!page) { 344 err = -ENOMEM; 345 break; 346 } 347 new_mm->arch.ldt.u.pages[i] = 348 (struct ldt_entry *) page; 349 memcpy(new_mm->arch.ldt.u.pages[i], 350 from_mm->arch.ldt.u.pages[i], PAGE_SIZE); 351 } 352 } 353 new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count; 354 mutex_unlock(&from_mm->arch.ldt.lock); 355 356 out: 357 return err; 358 } 359 360 361 void free_ldt(struct mm_context *mm) 362 { 363 int i; 364 365 if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) { 366 i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; 367 while (i-- > 0) 368 free_page((long) mm->arch.ldt.u.pages[i]); 369 } 370 mm->arch.ldt.entry_count = 0; 371 } 372 373 SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , 374 unsigned long , bytecount) 375 { 376 /* See non-um modify_ldt() for why we do this cast */ 377 return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount); 378 } 379