1 /* 2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds 3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2002 Andi Kleen 5 * 6 * This handles calls from both 32bit and 64bit mode. 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/gfp.h> 11 #include <linux/sched.h> 12 #include <linux/string.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/slab.h> 16 #include <linux/vmalloc.h> 17 #include <linux/uaccess.h> 18 19 #include <asm/ldt.h> 20 #include <asm/desc.h> 21 #include <asm/mmu_context.h> 22 #include <asm/syscalls.h> 23 24 /* context.lock is held for us, so we don't need any locking. */ 25 static void flush_ldt(void *__mm) 26 { 27 struct mm_struct *mm = __mm; 28 mm_context_t *pc; 29 30 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) 31 return; 32 33 pc = &mm->context; 34 set_ldt(pc->ldt->entries, pc->ldt->nr_entries); 35 } 36 37 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ 38 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) 39 { 40 struct ldt_struct *new_ldt; 41 unsigned int alloc_size; 42 43 if (num_entries > LDT_ENTRIES) 44 return NULL; 45 46 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); 47 if (!new_ldt) 48 return NULL; 49 50 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); 51 alloc_size = num_entries * LDT_ENTRY_SIZE; 52 53 /* 54 * Xen is very picky: it requires a page-aligned LDT that has no 55 * trailing nonzero bytes in any page that contains LDT descriptors. 56 * Keep it simple: zero the whole allocation and never allocate less 57 * than PAGE_SIZE. 58 */ 59 if (alloc_size > PAGE_SIZE) 60 new_ldt->entries = vzalloc(alloc_size); 61 else 62 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); 63 64 if (!new_ldt->entries) { 65 kfree(new_ldt); 66 return NULL; 67 } 68 69 new_ldt->nr_entries = num_entries; 70 return new_ldt; 71 } 72 73 /* After calling this, the LDT is immutable. */ 74 static void finalize_ldt_struct(struct ldt_struct *ldt) 75 { 76 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); 77 } 78 79 /* context.lock is held */ 80 static void install_ldt(struct mm_struct *current_mm, 81 struct ldt_struct *ldt) 82 { 83 /* Synchronizes with lockless_dereference in load_mm_ldt. */ 84 smp_store_release(¤t_mm->context.ldt, ldt); 85 86 /* Activate the LDT for all CPUs using current_mm. */ 87 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); 88 } 89 90 static void free_ldt_struct(struct ldt_struct *ldt) 91 { 92 if (likely(!ldt)) 93 return; 94 95 paravirt_free_ldt(ldt->entries, ldt->nr_entries); 96 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE) 97 vfree_atomic(ldt->entries); 98 else 99 free_page((unsigned long)ldt->entries); 100 kfree(ldt); 101 } 102 103 /* 104 * we do not have to muck with descriptors here, that is 105 * done in switch_mm() as needed. 106 */ 107 int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) 108 { 109 struct ldt_struct *new_ldt; 110 struct mm_struct *old_mm; 111 int retval = 0; 112 113 mutex_init(&mm->context.lock); 114 old_mm = current->mm; 115 if (!old_mm) { 116 mm->context.ldt = NULL; 117 return 0; 118 } 119 120 mutex_lock(&old_mm->context.lock); 121 if (!old_mm->context.ldt) { 122 mm->context.ldt = NULL; 123 goto out_unlock; 124 } 125 126 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); 127 if (!new_ldt) { 128 retval = -ENOMEM; 129 goto out_unlock; 130 } 131 132 memcpy(new_ldt->entries, old_mm->context.ldt->entries, 133 new_ldt->nr_entries * LDT_ENTRY_SIZE); 134 finalize_ldt_struct(new_ldt); 135 136 mm->context.ldt = new_ldt; 137 138 out_unlock: 139 mutex_unlock(&old_mm->context.lock); 140 return retval; 141 } 142 143 /* 144 * No need to lock the MM as we are the last user 145 * 146 * 64bit: Don't touch the LDT register - we're already in the next thread. 147 */ 148 void destroy_context_ldt(struct mm_struct *mm) 149 { 150 free_ldt_struct(mm->context.ldt); 151 mm->context.ldt = NULL; 152 } 153 154 static int read_ldt(void __user *ptr, unsigned long bytecount) 155 { 156 struct mm_struct *mm = current->mm; 157 unsigned long entries_size; 158 int retval; 159 160 mutex_lock(&mm->context.lock); 161 162 if (!mm->context.ldt) { 163 retval = 0; 164 goto out_unlock; 165 } 166 167 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 168 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 169 170 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE; 171 if (entries_size > bytecount) 172 entries_size = bytecount; 173 174 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { 175 retval = -EFAULT; 176 goto out_unlock; 177 } 178 179 if (entries_size != bytecount) { 180 /* Zero-fill the rest and pretend we read bytecount bytes. */ 181 if (clear_user(ptr + entries_size, bytecount - entries_size)) { 182 retval = -EFAULT; 183 goto out_unlock; 184 } 185 } 186 retval = bytecount; 187 188 out_unlock: 189 mutex_unlock(&mm->context.lock); 190 return retval; 191 } 192 193 static int read_default_ldt(void __user *ptr, unsigned long bytecount) 194 { 195 /* CHECKME: Can we use _one_ random number ? */ 196 #ifdef CONFIG_X86_32 197 unsigned long size = 5 * sizeof(struct desc_struct); 198 #else 199 unsigned long size = 128; 200 #endif 201 if (bytecount > size) 202 bytecount = size; 203 if (clear_user(ptr, bytecount)) 204 return -EFAULT; 205 return bytecount; 206 } 207 208 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) 209 { 210 struct mm_struct *mm = current->mm; 211 struct ldt_struct *new_ldt, *old_ldt; 212 unsigned int old_nr_entries, new_nr_entries; 213 struct user_desc ldt_info; 214 struct desc_struct ldt; 215 int error; 216 217 error = -EINVAL; 218 if (bytecount != sizeof(ldt_info)) 219 goto out; 220 error = -EFAULT; 221 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) 222 goto out; 223 224 error = -EINVAL; 225 if (ldt_info.entry_number >= LDT_ENTRIES) 226 goto out; 227 if (ldt_info.contents == 3) { 228 if (oldmode) 229 goto out; 230 if (ldt_info.seg_not_present == 0) 231 goto out; 232 } 233 234 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || 235 LDT_empty(&ldt_info)) { 236 /* The user wants to clear the entry. */ 237 memset(&ldt, 0, sizeof(ldt)); 238 } else { 239 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 240 error = -EINVAL; 241 goto out; 242 } 243 244 fill_ldt(&ldt, &ldt_info); 245 if (oldmode) 246 ldt.avl = 0; 247 } 248 249 mutex_lock(&mm->context.lock); 250 251 old_ldt = mm->context.ldt; 252 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; 253 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries); 254 255 error = -ENOMEM; 256 new_ldt = alloc_ldt_struct(new_nr_entries); 257 if (!new_ldt) 258 goto out_unlock; 259 260 if (old_ldt) 261 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); 262 263 new_ldt->entries[ldt_info.entry_number] = ldt; 264 finalize_ldt_struct(new_ldt); 265 266 install_ldt(mm, new_ldt); 267 free_ldt_struct(old_ldt); 268 error = 0; 269 270 out_unlock: 271 mutex_unlock(&mm->context.lock); 272 out: 273 return error; 274 } 275 276 asmlinkage int sys_modify_ldt(int func, void __user *ptr, 277 unsigned long bytecount) 278 { 279 int ret = -ENOSYS; 280 281 switch (func) { 282 case 0: 283 ret = read_ldt(ptr, bytecount); 284 break; 285 case 1: 286 ret = write_ldt(ptr, bytecount, 1); 287 break; 288 case 2: 289 ret = read_default_ldt(ptr, bytecount); 290 break; 291 case 0x11: 292 ret = write_ldt(ptr, bytecount, 0); 293 break; 294 } 295 return ret; 296 } 297