1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds 4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 5 * Copyright (C) 2002 Andi Kleen 6 * 7 * This handles calls from both 32bit and 64bit mode. 8 * 9 * Lock order: 10 * contex.ldt_usr_sem 11 * mmap_sem 12 * context.lock 13 */ 14 15 #include <linux/errno.h> 16 #include <linux/gfp.h> 17 #include <linux/sched.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/smp.h> 21 #include <linux/syscalls.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/uaccess.h> 25 26 #include <asm/ldt.h> 27 #include <asm/tlb.h> 28 #include <asm/desc.h> 29 #include <asm/mmu_context.h> 30 #include <asm/syscalls.h> 31 32 static void refresh_ldt_segments(void) 33 { 34 #ifdef CONFIG_X86_64 35 unsigned short sel; 36 37 /* 38 * Make sure that the cached DS and ES descriptors match the updated 39 * LDT. 40 */ 41 savesegment(ds, sel); 42 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) 43 loadsegment(ds, sel); 44 45 savesegment(es, sel); 46 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) 47 loadsegment(es, sel); 48 #endif 49 } 50 51 /* context.lock is held by the task which issued the smp function call */ 52 static void flush_ldt(void *__mm) 53 { 54 struct mm_struct *mm = __mm; 55 56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) 57 return; 58 59 load_mm_ldt(mm); 60 61 refresh_ldt_segments(); 62 } 63 64 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ 65 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) 66 { 67 struct ldt_struct *new_ldt; 68 unsigned int alloc_size; 69 70 if (num_entries > LDT_ENTRIES) 71 return NULL; 72 73 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); 74 if (!new_ldt) 75 return NULL; 76 77 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); 78 alloc_size = num_entries * LDT_ENTRY_SIZE; 79 80 /* 81 * Xen is very picky: it requires a page-aligned LDT that has no 82 * trailing nonzero bytes in any page that contains LDT descriptors. 83 * Keep it simple: zero the whole allocation and never allocate less 84 * than PAGE_SIZE. 85 */ 86 if (alloc_size > PAGE_SIZE) 87 new_ldt->entries = vzalloc(alloc_size); 88 else 89 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); 90 91 if (!new_ldt->entries) { 92 kfree(new_ldt); 93 return NULL; 94 } 95 96 /* The new LDT isn't aliased for PTI yet. */ 97 new_ldt->slot = -1; 98 99 new_ldt->nr_entries = num_entries; 100 return new_ldt; 101 } 102 103 #ifdef CONFIG_PAGE_TABLE_ISOLATION 104 105 static void do_sanity_check(struct mm_struct *mm, 106 bool had_kernel_mapping, 107 bool had_user_mapping) 108 { 109 if (mm->context.ldt) { 110 /* 111 * We already had an LDT. The top-level entry should already 112 * have been allocated and synchronized with the usermode 113 * tables. 114 */ 115 WARN_ON(!had_kernel_mapping); 116 if (static_cpu_has(X86_FEATURE_PTI)) 117 WARN_ON(!had_user_mapping); 118 } else { 119 /* 120 * This is the first time we're mapping an LDT for this process. 121 * Sync the pgd to the usermode tables. 122 */ 123 WARN_ON(had_kernel_mapping); 124 if (static_cpu_has(X86_FEATURE_PTI)) 125 WARN_ON(had_user_mapping); 126 } 127 } 128 129 #ifdef CONFIG_X86_PAE 130 131 static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va) 132 { 133 p4d_t *p4d; 134 pud_t *pud; 135 136 if (pgd->pgd == 0) 137 return NULL; 138 139 p4d = p4d_offset(pgd, va); 140 if (p4d_none(*p4d)) 141 return NULL; 142 143 pud = pud_offset(p4d, va); 144 if (pud_none(*pud)) 145 return NULL; 146 147 return pmd_offset(pud, va); 148 } 149 150 static void map_ldt_struct_to_user(struct mm_struct *mm) 151 { 152 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); 153 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd); 154 pmd_t *k_pmd, *u_pmd; 155 156 k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR); 157 u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR); 158 159 if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) 160 set_pmd(u_pmd, *k_pmd); 161 } 162 163 static void sanity_check_ldt_mapping(struct mm_struct *mm) 164 { 165 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); 166 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd); 167 bool had_kernel, had_user; 168 pmd_t *k_pmd, *u_pmd; 169 170 k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR); 171 u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR); 172 had_kernel = (k_pmd->pmd != 0); 173 had_user = (u_pmd->pmd != 0); 174 175 do_sanity_check(mm, had_kernel, had_user); 176 } 177 178 #else /* !CONFIG_X86_PAE */ 179 180 static void map_ldt_struct_to_user(struct mm_struct *mm) 181 { 182 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); 183 184 if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) 185 set_pgd(kernel_to_user_pgdp(pgd), *pgd); 186 } 187 188 static void sanity_check_ldt_mapping(struct mm_struct *mm) 189 { 190 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); 191 bool had_kernel = (pgd->pgd != 0); 192 bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0); 193 194 do_sanity_check(mm, had_kernel, had_user); 195 } 196 197 #endif /* CONFIG_X86_PAE */ 198 199 /* 200 * If PTI is enabled, this maps the LDT into the kernelmode and 201 * usermode tables for the given mm. 202 */ 203 static int 204 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) 205 { 206 unsigned long va; 207 bool is_vmalloc; 208 spinlock_t *ptl; 209 int i, nr_pages; 210 pgd_t *pgd; 211 212 if (!static_cpu_has(X86_FEATURE_PTI)) 213 return 0; 214 215 /* 216 * Any given ldt_struct should have map_ldt_struct() called at most 217 * once. 218 */ 219 WARN_ON(ldt->slot != -1); 220 221 /* Check if the current mappings are sane */ 222 sanity_check_ldt_mapping(mm); 223 224 /* 225 * Did we already have the top level entry allocated? We can't 226 * use pgd_none() for this because it doens't do anything on 227 * 4-level page table kernels. 228 */ 229 pgd = pgd_offset(mm, LDT_BASE_ADDR); 230 231 is_vmalloc = is_vmalloc_addr(ldt->entries); 232 233 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); 234 235 for (i = 0; i < nr_pages; i++) { 236 unsigned long offset = i << PAGE_SHIFT; 237 const void *src = (char *)ldt->entries + offset; 238 unsigned long pfn; 239 pgprot_t pte_prot; 240 pte_t pte, *ptep; 241 242 va = (unsigned long)ldt_slot_va(slot) + offset; 243 pfn = is_vmalloc ? vmalloc_to_pfn(src) : 244 page_to_pfn(virt_to_page(src)); 245 /* 246 * Treat the PTI LDT range as a *userspace* range. 247 * get_locked_pte() will allocate all needed pagetables 248 * and account for them in this mm. 249 */ 250 ptep = get_locked_pte(mm, va, &ptl); 251 if (!ptep) 252 return -ENOMEM; 253 /* 254 * Map it RO so the easy to find address is not a primary 255 * target via some kernel interface which misses a 256 * permission check. 257 */ 258 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); 259 /* Filter out unsuppored __PAGE_KERNEL* bits: */ 260 pgprot_val(pte_prot) &= __supported_pte_mask; 261 pte = pfn_pte(pfn, pte_prot); 262 set_pte_at(mm, va, ptep, pte); 263 pte_unmap_unlock(ptep, ptl); 264 } 265 266 /* Propagate LDT mapping to the user page-table */ 267 map_ldt_struct_to_user(mm); 268 269 ldt->slot = slot; 270 return 0; 271 } 272 273 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) 274 { 275 unsigned long va; 276 int i, nr_pages; 277 278 if (!ldt) 279 return; 280 281 /* LDT map/unmap is only required for PTI */ 282 if (!static_cpu_has(X86_FEATURE_PTI)) 283 return; 284 285 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); 286 287 for (i = 0; i < nr_pages; i++) { 288 unsigned long offset = i << PAGE_SHIFT; 289 spinlock_t *ptl; 290 pte_t *ptep; 291 292 va = (unsigned long)ldt_slot_va(ldt->slot) + offset; 293 ptep = get_locked_pte(mm, va, &ptl); 294 pte_clear(mm, va, ptep); 295 pte_unmap_unlock(ptep, ptl); 296 } 297 298 va = (unsigned long)ldt_slot_va(ldt->slot); 299 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); 300 } 301 302 #else /* !CONFIG_PAGE_TABLE_ISOLATION */ 303 304 static int 305 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) 306 { 307 return 0; 308 } 309 310 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) 311 { 312 } 313 #endif /* CONFIG_PAGE_TABLE_ISOLATION */ 314 315 static void free_ldt_pgtables(struct mm_struct *mm) 316 { 317 #ifdef CONFIG_PAGE_TABLE_ISOLATION 318 struct mmu_gather tlb; 319 unsigned long start = LDT_BASE_ADDR; 320 unsigned long end = LDT_END_ADDR; 321 322 if (!static_cpu_has(X86_FEATURE_PTI)) 323 return; 324 325 tlb_gather_mmu(&tlb, mm, start, end); 326 free_pgd_range(&tlb, start, end, start, end); 327 tlb_finish_mmu(&tlb, start, end); 328 #endif 329 } 330 331 /* After calling this, the LDT is immutable. */ 332 static void finalize_ldt_struct(struct ldt_struct *ldt) 333 { 334 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); 335 } 336 337 static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) 338 { 339 mutex_lock(&mm->context.lock); 340 341 /* Synchronizes with READ_ONCE in load_mm_ldt. */ 342 smp_store_release(&mm->context.ldt, ldt); 343 344 /* Activate the LDT for all CPUs using currents mm. */ 345 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); 346 347 mutex_unlock(&mm->context.lock); 348 } 349 350 static void free_ldt_struct(struct ldt_struct *ldt) 351 { 352 if (likely(!ldt)) 353 return; 354 355 paravirt_free_ldt(ldt->entries, ldt->nr_entries); 356 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE) 357 vfree_atomic(ldt->entries); 358 else 359 free_page((unsigned long)ldt->entries); 360 kfree(ldt); 361 } 362 363 /* 364 * Called on fork from arch_dup_mmap(). Just copy the current LDT state, 365 * the new task is not running, so nothing can be installed. 366 */ 367 int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) 368 { 369 struct ldt_struct *new_ldt; 370 int retval = 0; 371 372 if (!old_mm) 373 return 0; 374 375 mutex_lock(&old_mm->context.lock); 376 if (!old_mm->context.ldt) 377 goto out_unlock; 378 379 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); 380 if (!new_ldt) { 381 retval = -ENOMEM; 382 goto out_unlock; 383 } 384 385 memcpy(new_ldt->entries, old_mm->context.ldt->entries, 386 new_ldt->nr_entries * LDT_ENTRY_SIZE); 387 finalize_ldt_struct(new_ldt); 388 389 retval = map_ldt_struct(mm, new_ldt, 0); 390 if (retval) { 391 free_ldt_pgtables(mm); 392 free_ldt_struct(new_ldt); 393 goto out_unlock; 394 } 395 mm->context.ldt = new_ldt; 396 397 out_unlock: 398 mutex_unlock(&old_mm->context.lock); 399 return retval; 400 } 401 402 /* 403 * No need to lock the MM as we are the last user 404 * 405 * 64bit: Don't touch the LDT register - we're already in the next thread. 406 */ 407 void destroy_context_ldt(struct mm_struct *mm) 408 { 409 free_ldt_struct(mm->context.ldt); 410 mm->context.ldt = NULL; 411 } 412 413 void ldt_arch_exit_mmap(struct mm_struct *mm) 414 { 415 free_ldt_pgtables(mm); 416 } 417 418 static int read_ldt(void __user *ptr, unsigned long bytecount) 419 { 420 struct mm_struct *mm = current->mm; 421 unsigned long entries_size; 422 int retval; 423 424 down_read(&mm->context.ldt_usr_sem); 425 426 if (!mm->context.ldt) { 427 retval = 0; 428 goto out_unlock; 429 } 430 431 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 432 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 433 434 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE; 435 if (entries_size > bytecount) 436 entries_size = bytecount; 437 438 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { 439 retval = -EFAULT; 440 goto out_unlock; 441 } 442 443 if (entries_size != bytecount) { 444 /* Zero-fill the rest and pretend we read bytecount bytes. */ 445 if (clear_user(ptr + entries_size, bytecount - entries_size)) { 446 retval = -EFAULT; 447 goto out_unlock; 448 } 449 } 450 retval = bytecount; 451 452 out_unlock: 453 up_read(&mm->context.ldt_usr_sem); 454 return retval; 455 } 456 457 static int read_default_ldt(void __user *ptr, unsigned long bytecount) 458 { 459 /* CHECKME: Can we use _one_ random number ? */ 460 #ifdef CONFIG_X86_32 461 unsigned long size = 5 * sizeof(struct desc_struct); 462 #else 463 unsigned long size = 128; 464 #endif 465 if (bytecount > size) 466 bytecount = size; 467 if (clear_user(ptr, bytecount)) 468 return -EFAULT; 469 return bytecount; 470 } 471 472 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) 473 { 474 struct mm_struct *mm = current->mm; 475 struct ldt_struct *new_ldt, *old_ldt; 476 unsigned int old_nr_entries, new_nr_entries; 477 struct user_desc ldt_info; 478 struct desc_struct ldt; 479 int error; 480 481 error = -EINVAL; 482 if (bytecount != sizeof(ldt_info)) 483 goto out; 484 error = -EFAULT; 485 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) 486 goto out; 487 488 error = -EINVAL; 489 if (ldt_info.entry_number >= LDT_ENTRIES) 490 goto out; 491 if (ldt_info.contents == 3) { 492 if (oldmode) 493 goto out; 494 if (ldt_info.seg_not_present == 0) 495 goto out; 496 } 497 498 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || 499 LDT_empty(&ldt_info)) { 500 /* The user wants to clear the entry. */ 501 memset(&ldt, 0, sizeof(ldt)); 502 } else { 503 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 504 error = -EINVAL; 505 goto out; 506 } 507 508 fill_ldt(&ldt, &ldt_info); 509 if (oldmode) 510 ldt.avl = 0; 511 } 512 513 if (down_write_killable(&mm->context.ldt_usr_sem)) 514 return -EINTR; 515 516 old_ldt = mm->context.ldt; 517 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; 518 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries); 519 520 error = -ENOMEM; 521 new_ldt = alloc_ldt_struct(new_nr_entries); 522 if (!new_ldt) 523 goto out_unlock; 524 525 if (old_ldt) 526 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); 527 528 new_ldt->entries[ldt_info.entry_number] = ldt; 529 finalize_ldt_struct(new_ldt); 530 531 /* 532 * If we are using PTI, map the new LDT into the userspace pagetables. 533 * If there is already an LDT, use the other slot so that other CPUs 534 * will continue to use the old LDT until install_ldt() switches 535 * them over to the new LDT. 536 */ 537 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); 538 if (error) { 539 /* 540 * This only can fail for the first LDT setup. If an LDT is 541 * already installed then the PTE page is already 542 * populated. Mop up a half populated page table. 543 */ 544 if (!WARN_ON_ONCE(old_ldt)) 545 free_ldt_pgtables(mm); 546 free_ldt_struct(new_ldt); 547 goto out_unlock; 548 } 549 550 install_ldt(mm, new_ldt); 551 unmap_ldt_struct(mm, old_ldt); 552 free_ldt_struct(old_ldt); 553 error = 0; 554 555 out_unlock: 556 up_write(&mm->context.ldt_usr_sem); 557 out: 558 return error; 559 } 560 561 SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , 562 unsigned long , bytecount) 563 { 564 int ret = -ENOSYS; 565 566 switch (func) { 567 case 0: 568 ret = read_ldt(ptr, bytecount); 569 break; 570 case 1: 571 ret = write_ldt(ptr, bytecount, 1); 572 break; 573 case 2: 574 ret = read_default_ldt(ptr, bytecount); 575 break; 576 case 0x11: 577 ret = write_ldt(ptr, bytecount, 0); 578 break; 579 } 580 /* 581 * The SYSCALL_DEFINE() macros give us an 'unsigned long' 582 * return type, but tht ABI for sys_modify_ldt() expects 583 * 'int'. This cast gives us an int-sized value in %rax 584 * for the return code. The 'unsigned' is necessary so 585 * the compiler does not try to sign-extend the negative 586 * return codes into the high half of the register when 587 * taking the value from int->long. 588 */ 589 return (unsigned int)ret; 590 } 591