1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds 4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 5 * Copyright (C) 2002 Andi Kleen 6 * 7 * This handles calls from both 32bit and 64bit mode. 8 * 9 * Lock order: 10 * contex.ldt_usr_sem 11 * mmap_sem 12 * context.lock 13 */ 14 15 #include <linux/errno.h> 16 #include <linux/gfp.h> 17 #include <linux/sched.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/smp.h> 21 #include <linux/syscalls.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/uaccess.h> 25 26 #include <asm/ldt.h> 27 #include <asm/tlb.h> 28 #include <asm/desc.h> 29 #include <asm/mmu_context.h> 30 #include <asm/syscalls.h> 31 32 static void refresh_ldt_segments(void) 33 { 34 #ifdef CONFIG_X86_64 35 unsigned short sel; 36 37 /* 38 * Make sure that the cached DS and ES descriptors match the updated 39 * LDT. 40 */ 41 savesegment(ds, sel); 42 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) 43 loadsegment(ds, sel); 44 45 savesegment(es, sel); 46 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) 47 loadsegment(es, sel); 48 #endif 49 } 50 51 /* context.lock is held by the task which issued the smp function call */ 52 static void flush_ldt(void *__mm) 53 { 54 struct mm_struct *mm = __mm; 55 56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) 57 return; 58 59 load_mm_ldt(mm); 60 61 refresh_ldt_segments(); 62 } 63 64 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ 65 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) 66 { 67 struct ldt_struct *new_ldt; 68 unsigned int alloc_size; 69 70 if (num_entries > LDT_ENTRIES) 71 return NULL; 72 73 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); 74 if (!new_ldt) 75 return NULL; 76 77 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); 78 alloc_size = num_entries * LDT_ENTRY_SIZE; 79 80 /* 81 * Xen is very picky: it requires a page-aligned LDT that has no 82 * trailing nonzero bytes in any page that contains LDT descriptors. 83 * Keep it simple: zero the whole allocation and never allocate less 84 * than PAGE_SIZE. 85 */ 86 if (alloc_size > PAGE_SIZE) 87 new_ldt->entries = vzalloc(alloc_size); 88 else 89 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); 90 91 if (!new_ldt->entries) { 92 kfree(new_ldt); 93 return NULL; 94 } 95 96 /* The new LDT isn't aliased for PTI yet. */ 97 new_ldt->slot = -1; 98 99 new_ldt->nr_entries = num_entries; 100 return new_ldt; 101 } 102 103 #ifdef CONFIG_PAGE_TABLE_ISOLATION 104 105 static void do_sanity_check(struct mm_struct *mm, 106 bool had_kernel_mapping, 107 bool had_user_mapping) 108 { 109 if (mm->context.ldt) { 110 /* 111 * We already had an LDT. The top-level entry should already 112 * have been allocated and synchronized with the usermode 113 * tables. 114 */ 115 WARN_ON(!had_kernel_mapping); 116 if (static_cpu_has(X86_FEATURE_PTI)) 117 WARN_ON(!had_user_mapping); 118 } else { 119 /* 120 * This is the first time we're mapping an LDT for this process. 121 * Sync the pgd to the usermode tables. 122 */ 123 WARN_ON(had_kernel_mapping); 124 if (static_cpu_has(X86_FEATURE_PTI)) 125 WARN_ON(had_user_mapping); 126 } 127 } 128 129 static void map_ldt_struct_to_user(struct mm_struct *mm) 130 { 131 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); 132 133 if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) 134 set_pgd(kernel_to_user_pgdp(pgd), *pgd); 135 } 136 137 static void sanity_check_ldt_mapping(struct mm_struct *mm) 138 { 139 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); 140 bool had_kernel = (pgd->pgd != 0); 141 bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0); 142 143 do_sanity_check(mm, had_kernel, had_user); 144 } 145 146 /* 147 * If PTI is enabled, this maps the LDT into the kernelmode and 148 * usermode tables for the given mm. 149 * 150 * There is no corresponding unmap function. Even if the LDT is freed, we 151 * leave the PTEs around until the slot is reused or the mm is destroyed. 152 * This is harmless: the LDT is always in ordinary memory, and no one will 153 * access the freed slot. 154 * 155 * If we wanted to unmap freed LDTs, we'd also need to do a flush to make 156 * it useful, and the flush would slow down modify_ldt(). 157 */ 158 static int 159 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) 160 { 161 unsigned long va; 162 bool is_vmalloc; 163 spinlock_t *ptl; 164 pgd_t *pgd; 165 int i; 166 167 if (!static_cpu_has(X86_FEATURE_PTI)) 168 return 0; 169 170 /* 171 * Any given ldt_struct should have map_ldt_struct() called at most 172 * once. 173 */ 174 WARN_ON(ldt->slot != -1); 175 176 /* Check if the current mappings are sane */ 177 sanity_check_ldt_mapping(mm); 178 179 /* 180 * Did we already have the top level entry allocated? We can't 181 * use pgd_none() for this because it doens't do anything on 182 * 4-level page table kernels. 183 */ 184 pgd = pgd_offset(mm, LDT_BASE_ADDR); 185 186 is_vmalloc = is_vmalloc_addr(ldt->entries); 187 188 for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { 189 unsigned long offset = i << PAGE_SHIFT; 190 const void *src = (char *)ldt->entries + offset; 191 unsigned long pfn; 192 pgprot_t pte_prot; 193 pte_t pte, *ptep; 194 195 va = (unsigned long)ldt_slot_va(slot) + offset; 196 pfn = is_vmalloc ? vmalloc_to_pfn(src) : 197 page_to_pfn(virt_to_page(src)); 198 /* 199 * Treat the PTI LDT range as a *userspace* range. 200 * get_locked_pte() will allocate all needed pagetables 201 * and account for them in this mm. 202 */ 203 ptep = get_locked_pte(mm, va, &ptl); 204 if (!ptep) 205 return -ENOMEM; 206 /* 207 * Map it RO so the easy to find address is not a primary 208 * target via some kernel interface which misses a 209 * permission check. 210 */ 211 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); 212 /* Filter out unsuppored __PAGE_KERNEL* bits: */ 213 pgprot_val(pte_prot) &= __supported_pte_mask; 214 pte = pfn_pte(pfn, pte_prot); 215 set_pte_at(mm, va, ptep, pte); 216 pte_unmap_unlock(ptep, ptl); 217 } 218 219 /* Propagate LDT mapping to the user page-table */ 220 map_ldt_struct_to_user(mm); 221 222 va = (unsigned long)ldt_slot_va(slot); 223 flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); 224 225 ldt->slot = slot; 226 return 0; 227 } 228 229 #else /* !CONFIG_PAGE_TABLE_ISOLATION */ 230 231 static int 232 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) 233 { 234 return 0; 235 } 236 #endif /* CONFIG_PAGE_TABLE_ISOLATION */ 237 238 static void free_ldt_pgtables(struct mm_struct *mm) 239 { 240 #ifdef CONFIG_PAGE_TABLE_ISOLATION 241 struct mmu_gather tlb; 242 unsigned long start = LDT_BASE_ADDR; 243 unsigned long end = LDT_END_ADDR; 244 245 if (!static_cpu_has(X86_FEATURE_PTI)) 246 return; 247 248 tlb_gather_mmu(&tlb, mm, start, end); 249 free_pgd_range(&tlb, start, end, start, end); 250 tlb_finish_mmu(&tlb, start, end); 251 #endif 252 } 253 254 /* After calling this, the LDT is immutable. */ 255 static void finalize_ldt_struct(struct ldt_struct *ldt) 256 { 257 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); 258 } 259 260 static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) 261 { 262 mutex_lock(&mm->context.lock); 263 264 /* Synchronizes with READ_ONCE in load_mm_ldt. */ 265 smp_store_release(&mm->context.ldt, ldt); 266 267 /* Activate the LDT for all CPUs using currents mm. */ 268 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); 269 270 mutex_unlock(&mm->context.lock); 271 } 272 273 static void free_ldt_struct(struct ldt_struct *ldt) 274 { 275 if (likely(!ldt)) 276 return; 277 278 paravirt_free_ldt(ldt->entries, ldt->nr_entries); 279 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE) 280 vfree_atomic(ldt->entries); 281 else 282 free_page((unsigned long)ldt->entries); 283 kfree(ldt); 284 } 285 286 /* 287 * Called on fork from arch_dup_mmap(). Just copy the current LDT state, 288 * the new task is not running, so nothing can be installed. 289 */ 290 int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) 291 { 292 struct ldt_struct *new_ldt; 293 int retval = 0; 294 295 if (!old_mm) 296 return 0; 297 298 mutex_lock(&old_mm->context.lock); 299 if (!old_mm->context.ldt) 300 goto out_unlock; 301 302 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); 303 if (!new_ldt) { 304 retval = -ENOMEM; 305 goto out_unlock; 306 } 307 308 memcpy(new_ldt->entries, old_mm->context.ldt->entries, 309 new_ldt->nr_entries * LDT_ENTRY_SIZE); 310 finalize_ldt_struct(new_ldt); 311 312 retval = map_ldt_struct(mm, new_ldt, 0); 313 if (retval) { 314 free_ldt_pgtables(mm); 315 free_ldt_struct(new_ldt); 316 goto out_unlock; 317 } 318 mm->context.ldt = new_ldt; 319 320 out_unlock: 321 mutex_unlock(&old_mm->context.lock); 322 return retval; 323 } 324 325 /* 326 * No need to lock the MM as we are the last user 327 * 328 * 64bit: Don't touch the LDT register - we're already in the next thread. 329 */ 330 void destroy_context_ldt(struct mm_struct *mm) 331 { 332 free_ldt_struct(mm->context.ldt); 333 mm->context.ldt = NULL; 334 } 335 336 void ldt_arch_exit_mmap(struct mm_struct *mm) 337 { 338 free_ldt_pgtables(mm); 339 } 340 341 static int read_ldt(void __user *ptr, unsigned long bytecount) 342 { 343 struct mm_struct *mm = current->mm; 344 unsigned long entries_size; 345 int retval; 346 347 down_read(&mm->context.ldt_usr_sem); 348 349 if (!mm->context.ldt) { 350 retval = 0; 351 goto out_unlock; 352 } 353 354 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 355 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 356 357 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE; 358 if (entries_size > bytecount) 359 entries_size = bytecount; 360 361 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { 362 retval = -EFAULT; 363 goto out_unlock; 364 } 365 366 if (entries_size != bytecount) { 367 /* Zero-fill the rest and pretend we read bytecount bytes. */ 368 if (clear_user(ptr + entries_size, bytecount - entries_size)) { 369 retval = -EFAULT; 370 goto out_unlock; 371 } 372 } 373 retval = bytecount; 374 375 out_unlock: 376 up_read(&mm->context.ldt_usr_sem); 377 return retval; 378 } 379 380 static int read_default_ldt(void __user *ptr, unsigned long bytecount) 381 { 382 /* CHECKME: Can we use _one_ random number ? */ 383 #ifdef CONFIG_X86_32 384 unsigned long size = 5 * sizeof(struct desc_struct); 385 #else 386 unsigned long size = 128; 387 #endif 388 if (bytecount > size) 389 bytecount = size; 390 if (clear_user(ptr, bytecount)) 391 return -EFAULT; 392 return bytecount; 393 } 394 395 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) 396 { 397 struct mm_struct *mm = current->mm; 398 struct ldt_struct *new_ldt, *old_ldt; 399 unsigned int old_nr_entries, new_nr_entries; 400 struct user_desc ldt_info; 401 struct desc_struct ldt; 402 int error; 403 404 error = -EINVAL; 405 if (bytecount != sizeof(ldt_info)) 406 goto out; 407 error = -EFAULT; 408 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) 409 goto out; 410 411 error = -EINVAL; 412 if (ldt_info.entry_number >= LDT_ENTRIES) 413 goto out; 414 if (ldt_info.contents == 3) { 415 if (oldmode) 416 goto out; 417 if (ldt_info.seg_not_present == 0) 418 goto out; 419 } 420 421 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || 422 LDT_empty(&ldt_info)) { 423 /* The user wants to clear the entry. */ 424 memset(&ldt, 0, sizeof(ldt)); 425 } else { 426 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 427 error = -EINVAL; 428 goto out; 429 } 430 431 fill_ldt(&ldt, &ldt_info); 432 if (oldmode) 433 ldt.avl = 0; 434 } 435 436 if (down_write_killable(&mm->context.ldt_usr_sem)) 437 return -EINTR; 438 439 old_ldt = mm->context.ldt; 440 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; 441 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries); 442 443 error = -ENOMEM; 444 new_ldt = alloc_ldt_struct(new_nr_entries); 445 if (!new_ldt) 446 goto out_unlock; 447 448 if (old_ldt) 449 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); 450 451 new_ldt->entries[ldt_info.entry_number] = ldt; 452 finalize_ldt_struct(new_ldt); 453 454 /* 455 * If we are using PTI, map the new LDT into the userspace pagetables. 456 * If there is already an LDT, use the other slot so that other CPUs 457 * will continue to use the old LDT until install_ldt() switches 458 * them over to the new LDT. 459 */ 460 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); 461 if (error) { 462 /* 463 * This only can fail for the first LDT setup. If an LDT is 464 * already installed then the PTE page is already 465 * populated. Mop up a half populated page table. 466 */ 467 if (!WARN_ON_ONCE(old_ldt)) 468 free_ldt_pgtables(mm); 469 free_ldt_struct(new_ldt); 470 goto out_unlock; 471 } 472 473 install_ldt(mm, new_ldt); 474 free_ldt_struct(old_ldt); 475 error = 0; 476 477 out_unlock: 478 up_write(&mm->context.ldt_usr_sem); 479 out: 480 return error; 481 } 482 483 SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , 484 unsigned long , bytecount) 485 { 486 int ret = -ENOSYS; 487 488 switch (func) { 489 case 0: 490 ret = read_ldt(ptr, bytecount); 491 break; 492 case 1: 493 ret = write_ldt(ptr, bytecount, 1); 494 break; 495 case 2: 496 ret = read_default_ldt(ptr, bytecount); 497 break; 498 case 0x11: 499 ret = write_ldt(ptr, bytecount, 0); 500 break; 501 } 502 /* 503 * The SYSCALL_DEFINE() macros give us an 'unsigned long' 504 * return type, but tht ABI for sys_modify_ldt() expects 505 * 'int'. This cast gives us an int-sized value in %rax 506 * for the return code. The 'unsigned' is necessary so 507 * the compiler does not try to sign-extend the negative 508 * return codes into the high half of the register when 509 * taking the value from int->long. 510 */ 511 return (unsigned int)ret; 512 } 513