1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1995 Linus Torvalds 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 * 8 * X86-64 port 9 * Andi Kleen. 10 * 11 * CPU hotplug support - ashok.raj@intel.com 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of process handling.. 16 */ 17 18 #include <linux/cpu.h> 19 #include <linux/errno.h> 20 #include <linux/sched.h> 21 #include <linux/sched/task.h> 22 #include <linux/sched/task_stack.h> 23 #include <linux/fs.h> 24 #include <linux/kernel.h> 25 #include <linux/mm.h> 26 #include <linux/elfcore.h> 27 #include <linux/smp.h> 28 #include <linux/slab.h> 29 #include <linux/user.h> 30 #include <linux/interrupt.h> 31 #include <linux/delay.h> 32 #include <linux/export.h> 33 #include <linux/ptrace.h> 34 #include <linux/notifier.h> 35 #include <linux/kprobes.h> 36 #include <linux/kdebug.h> 37 #include <linux/prctl.h> 38 #include <linux/uaccess.h> 39 #include <linux/io.h> 40 #include <linux/ftrace.h> 41 #include <linux/syscalls.h> 42 43 #include <asm/processor.h> 44 #include <asm/fpu/internal.h> 45 #include <asm/mmu_context.h> 46 #include <asm/prctl.h> 47 #include <asm/desc.h> 48 #include <asm/proto.h> 49 #include <asm/ia32.h> 50 #include <asm/debugreg.h> 51 #include <asm/switch_to.h> 52 #include <asm/xen/hypervisor.h> 53 #include <asm/vdso.h> 54 #include <asm/resctrl.h> 55 #include <asm/unistd.h> 56 #include <asm/fsgsbase.h> 57 #ifdef CONFIG_IA32_EMULATION 58 /* Not included via unistd.h */ 59 #include <asm/unistd_32_ia32.h> 60 #endif 61 62 #include "process.h" 63 64 /* Prints also some state that isn't saved in the pt_regs */ 65 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 66 { 67 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 68 unsigned long d0, d1, d2, d3, d6, d7; 69 unsigned int fsindex, gsindex; 70 unsigned int ds, es; 71 72 show_iret_regs(regs); 73 74 if (regs->orig_ax != -1) 75 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); 76 else 77 pr_cont("\n"); 78 79 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n", 80 regs->ax, regs->bx, regs->cx); 81 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n", 82 regs->dx, regs->si, regs->di); 83 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n", 84 regs->bp, regs->r8, regs->r9); 85 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n", 86 regs->r10, regs->r11, regs->r12); 87 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", 88 regs->r13, regs->r14, regs->r15); 89 90 if (mode == SHOW_REGS_SHORT) 91 return; 92 93 if (mode == SHOW_REGS_USER) { 94 rdmsrl(MSR_FS_BASE, fs); 95 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 96 printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n", 97 fs, shadowgs); 98 return; 99 } 100 101 asm("movl %%ds,%0" : "=r" (ds)); 102 asm("movl %%es,%0" : "=r" (es)); 103 asm("movl %%fs,%0" : "=r" (fsindex)); 104 asm("movl %%gs,%0" : "=r" (gsindex)); 105 106 rdmsrl(MSR_FS_BASE, fs); 107 rdmsrl(MSR_GS_BASE, gs); 108 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 109 110 cr0 = read_cr0(); 111 cr2 = read_cr2(); 112 cr3 = __read_cr3(); 113 cr4 = __read_cr4(); 114 115 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 116 fs, fsindex, gs, gsindex, shadowgs); 117 printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds, 118 es, cr0); 119 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, 120 cr4); 121 122 get_debugreg(d0, 0); 123 get_debugreg(d1, 1); 124 get_debugreg(d2, 2); 125 get_debugreg(d3, 3); 126 get_debugreg(d6, 6); 127 get_debugreg(d7, 7); 128 129 /* Only print out debug registers if they are in their non-default state. */ 130 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && 131 (d6 == DR6_RESERVED) && (d7 == 0x400))) { 132 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", 133 d0, d1, d2); 134 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", 135 d3, d6, d7); 136 } 137 138 if (boot_cpu_has(X86_FEATURE_OSPKE)) 139 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru()); 140 } 141 142 void release_thread(struct task_struct *dead_task) 143 { 144 WARN_ON(dead_task->mm); 145 } 146 147 enum which_selector { 148 FS, 149 GS 150 }; 151 152 /* 153 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are 154 * not available. The goal is to be reasonably fast on non-FSGSBASE systems. 155 * It's forcibly inlined because it'll generate better code and this function 156 * is hot. 157 */ 158 static __always_inline void save_base_legacy(struct task_struct *prev_p, 159 unsigned short selector, 160 enum which_selector which) 161 { 162 if (likely(selector == 0)) { 163 /* 164 * On Intel (without X86_BUG_NULL_SEG), the segment base could 165 * be the pre-existing saved base or it could be zero. On AMD 166 * (with X86_BUG_NULL_SEG), the segment base could be almost 167 * anything. 168 * 169 * This branch is very hot (it's hit twice on almost every 170 * context switch between 64-bit programs), and avoiding 171 * the RDMSR helps a lot, so we just assume that whatever 172 * value is already saved is correct. This matches historical 173 * Linux behavior, so it won't break existing applications. 174 * 175 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we 176 * report that the base is zero, it needs to actually be zero: 177 * see the corresponding logic in load_seg_legacy. 178 */ 179 } else { 180 /* 181 * If the selector is 1, 2, or 3, then the base is zero on 182 * !X86_BUG_NULL_SEG CPUs and could be anything on 183 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux 184 * has never attempted to preserve the base across context 185 * switches. 186 * 187 * If selector > 3, then it refers to a real segment, and 188 * saving the base isn't necessary. 189 */ 190 if (which == FS) 191 prev_p->thread.fsbase = 0; 192 else 193 prev_p->thread.gsbase = 0; 194 } 195 } 196 197 static __always_inline void save_fsgs(struct task_struct *task) 198 { 199 savesegment(fs, task->thread.fsindex); 200 savesegment(gs, task->thread.gsindex); 201 save_base_legacy(task, task->thread.fsindex, FS); 202 save_base_legacy(task, task->thread.gsindex, GS); 203 } 204 205 #if IS_ENABLED(CONFIG_KVM) 206 /* 207 * While a process is running,current->thread.fsbase and current->thread.gsbase 208 * may not match the corresponding CPU registers (see save_base_legacy()). KVM 209 * wants an efficient way to save and restore FSBASE and GSBASE. 210 * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE. 211 */ 212 void save_fsgs_for_kvm(void) 213 { 214 save_fsgs(current); 215 } 216 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm); 217 #endif 218 219 static __always_inline void loadseg(enum which_selector which, 220 unsigned short sel) 221 { 222 if (which == FS) 223 loadsegment(fs, sel); 224 else 225 load_gs_index(sel); 226 } 227 228 static __always_inline void load_seg_legacy(unsigned short prev_index, 229 unsigned long prev_base, 230 unsigned short next_index, 231 unsigned long next_base, 232 enum which_selector which) 233 { 234 if (likely(next_index <= 3)) { 235 /* 236 * The next task is using 64-bit TLS, is not using this 237 * segment at all, or is having fun with arcane CPU features. 238 */ 239 if (next_base == 0) { 240 /* 241 * Nasty case: on AMD CPUs, we need to forcibly zero 242 * the base. 243 */ 244 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) { 245 loadseg(which, __USER_DS); 246 loadseg(which, next_index); 247 } else { 248 /* 249 * We could try to exhaustively detect cases 250 * under which we can skip the segment load, 251 * but there's really only one case that matters 252 * for performance: if both the previous and 253 * next states are fully zeroed, we can skip 254 * the load. 255 * 256 * (This assumes that prev_base == 0 has no 257 * false positives. This is the case on 258 * Intel-style CPUs.) 259 */ 260 if (likely(prev_index | next_index | prev_base)) 261 loadseg(which, next_index); 262 } 263 } else { 264 if (prev_index != next_index) 265 loadseg(which, next_index); 266 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, 267 next_base); 268 } 269 } else { 270 /* 271 * The next task is using a real segment. Loading the selector 272 * is sufficient. 273 */ 274 loadseg(which, next_index); 275 } 276 } 277 278 static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, 279 struct thread_struct *next) 280 { 281 load_seg_legacy(prev->fsindex, prev->fsbase, 282 next->fsindex, next->fsbase, FS); 283 load_seg_legacy(prev->gsindex, prev->gsbase, 284 next->gsindex, next->gsbase, GS); 285 } 286 287 static unsigned long x86_fsgsbase_read_task(struct task_struct *task, 288 unsigned short selector) 289 { 290 unsigned short idx = selector >> 3; 291 unsigned long base; 292 293 if (likely((selector & SEGMENT_TI_MASK) == 0)) { 294 if (unlikely(idx >= GDT_ENTRIES)) 295 return 0; 296 297 /* 298 * There are no user segments in the GDT with nonzero bases 299 * other than the TLS segments. 300 */ 301 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 302 return 0; 303 304 idx -= GDT_ENTRY_TLS_MIN; 305 base = get_desc_base(&task->thread.tls_array[idx]); 306 } else { 307 #ifdef CONFIG_MODIFY_LDT_SYSCALL 308 struct ldt_struct *ldt; 309 310 /* 311 * If performance here mattered, we could protect the LDT 312 * with RCU. This is a slow path, though, so we can just 313 * take the mutex. 314 */ 315 mutex_lock(&task->mm->context.lock); 316 ldt = task->mm->context.ldt; 317 if (unlikely(idx >= ldt->nr_entries)) 318 base = 0; 319 else 320 base = get_desc_base(ldt->entries + idx); 321 mutex_unlock(&task->mm->context.lock); 322 #else 323 base = 0; 324 #endif 325 } 326 327 return base; 328 } 329 330 unsigned long x86_fsbase_read_task(struct task_struct *task) 331 { 332 unsigned long fsbase; 333 334 if (task == current) 335 fsbase = x86_fsbase_read_cpu(); 336 else if (task->thread.fsindex == 0) 337 fsbase = task->thread.fsbase; 338 else 339 fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); 340 341 return fsbase; 342 } 343 344 unsigned long x86_gsbase_read_task(struct task_struct *task) 345 { 346 unsigned long gsbase; 347 348 if (task == current) 349 gsbase = x86_gsbase_read_cpu_inactive(); 350 else if (task->thread.gsindex == 0) 351 gsbase = task->thread.gsbase; 352 else 353 gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); 354 355 return gsbase; 356 } 357 358 void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) 359 { 360 WARN_ON_ONCE(task == current); 361 362 task->thread.fsbase = fsbase; 363 } 364 365 void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) 366 { 367 WARN_ON_ONCE(task == current); 368 369 task->thread.gsbase = gsbase; 370 } 371 372 static void 373 start_thread_common(struct pt_regs *regs, unsigned long new_ip, 374 unsigned long new_sp, 375 unsigned int _cs, unsigned int _ss, unsigned int _ds) 376 { 377 WARN_ON_ONCE(regs != current_pt_regs()); 378 379 if (static_cpu_has(X86_BUG_NULL_SEG)) { 380 /* Loading zero below won't clear the base. */ 381 loadsegment(fs, __USER_DS); 382 load_gs_index(__USER_DS); 383 } 384 385 loadsegment(fs, 0); 386 loadsegment(es, _ds); 387 loadsegment(ds, _ds); 388 load_gs_index(0); 389 390 regs->ip = new_ip; 391 regs->sp = new_sp; 392 regs->cs = _cs; 393 regs->ss = _ss; 394 regs->flags = X86_EFLAGS_IF; 395 } 396 397 void 398 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 399 { 400 start_thread_common(regs, new_ip, new_sp, 401 __USER_CS, __USER_DS, 0); 402 } 403 EXPORT_SYMBOL_GPL(start_thread); 404 405 #ifdef CONFIG_COMPAT 406 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) 407 { 408 start_thread_common(regs, new_ip, new_sp, 409 test_thread_flag(TIF_X32) 410 ? __USER_CS : __USER32_CS, 411 __USER_DS, __USER_DS); 412 } 413 #endif 414 415 /* 416 * switch_to(x,y) should switch tasks from x to y. 417 * 418 * This could still be optimized: 419 * - fold all the options into a flag word and test it with a single test. 420 * - could test fs/gs bitsliced 421 * 422 * Kprobes not supported here. Set the probe on schedule instead. 423 * Function graph tracer not supported too. 424 */ 425 __visible __notrace_funcgraph struct task_struct * 426 __switch_to(struct task_struct *prev_p, struct task_struct *next_p) 427 { 428 struct thread_struct *prev = &prev_p->thread; 429 struct thread_struct *next = &next_p->thread; 430 struct fpu *prev_fpu = &prev->fpu; 431 struct fpu *next_fpu = &next->fpu; 432 int cpu = smp_processor_id(); 433 434 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && 435 this_cpu_read(irq_count) != -1); 436 437 if (!test_thread_flag(TIF_NEED_FPU_LOAD)) 438 switch_fpu_prepare(prev_fpu, cpu); 439 440 /* We must save %fs and %gs before load_TLS() because 441 * %fs and %gs may be cleared by load_TLS(). 442 * 443 * (e.g. xen_load_tls()) 444 */ 445 save_fsgs(prev_p); 446 447 /* 448 * Load TLS before restoring any segments so that segment loads 449 * reference the correct GDT entries. 450 */ 451 load_TLS(next, cpu); 452 453 /* 454 * Leave lazy mode, flushing any hypercalls made here. This 455 * must be done after loading TLS entries in the GDT but before 456 * loading segments that might reference them. 457 */ 458 arch_end_context_switch(next_p); 459 460 /* Switch DS and ES. 461 * 462 * Reading them only returns the selectors, but writing them (if 463 * nonzero) loads the full descriptor from the GDT or LDT. The 464 * LDT for next is loaded in switch_mm, and the GDT is loaded 465 * above. 466 * 467 * We therefore need to write new values to the segment 468 * registers on every context switch unless both the new and old 469 * values are zero. 470 * 471 * Note that we don't need to do anything for CS and SS, as 472 * those are saved and restored as part of pt_regs. 473 */ 474 savesegment(es, prev->es); 475 if (unlikely(next->es | prev->es)) 476 loadsegment(es, next->es); 477 478 savesegment(ds, prev->ds); 479 if (unlikely(next->ds | prev->ds)) 480 loadsegment(ds, next->ds); 481 482 x86_fsgsbase_load(prev, next); 483 484 /* 485 * Switch the PDA and FPU contexts. 486 */ 487 this_cpu_write(current_task, next_p); 488 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); 489 490 switch_fpu_finish(next_fpu); 491 492 /* Reload sp0. */ 493 update_task_stack(next_p); 494 495 switch_to_extra(prev_p, next_p); 496 497 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { 498 /* 499 * AMD CPUs have a misfeature: SYSRET sets the SS selector but 500 * does not update the cached descriptor. As a result, if we 501 * do SYSRET while SS is NULL, we'll end up in user mode with 502 * SS apparently equal to __USER_DS but actually unusable. 503 * 504 * The straightforward workaround would be to fix it up just 505 * before SYSRET, but that would slow down the system call 506 * fast paths. Instead, we ensure that SS is never NULL in 507 * system call context. We do this by replacing NULL SS 508 * selectors at every context switch. SYSCALL sets up a valid 509 * SS, so the only way to get NULL is to re-enter the kernel 510 * from CPL 3 through an interrupt. Since that can't happen 511 * in the same task as a running syscall, we are guaranteed to 512 * context switch between every interrupt vector entry and a 513 * subsequent SYSRET. 514 * 515 * We read SS first because SS reads are much faster than 516 * writes. Out of caution, we force SS to __KERNEL_DS even if 517 * it previously had a different non-NULL value. 518 */ 519 unsigned short ss_sel; 520 savesegment(ss, ss_sel); 521 if (ss_sel != __KERNEL_DS) 522 loadsegment(ss, __KERNEL_DS); 523 } 524 525 /* Load the Intel cache allocation PQR MSR. */ 526 resctrl_sched_in(); 527 528 return prev_p; 529 } 530 531 void set_personality_64bit(void) 532 { 533 /* inherit personality from parent */ 534 535 /* Make sure to be in 64bit mode */ 536 clear_thread_flag(TIF_IA32); 537 clear_thread_flag(TIF_ADDR32); 538 clear_thread_flag(TIF_X32); 539 /* Pretend that this comes from a 64bit execve */ 540 task_pt_regs(current)->orig_ax = __NR_execve; 541 current_thread_info()->status &= ~TS_COMPAT; 542 543 /* Ensure the corresponding mm is not marked. */ 544 if (current->mm) 545 current->mm->context.ia32_compat = 0; 546 547 /* TBD: overwrites user setup. Should have two bits. 548 But 64bit processes have always behaved this way, 549 so it's not too bad. The main problem is just that 550 32bit children are affected again. */ 551 current->personality &= ~READ_IMPLIES_EXEC; 552 } 553 554 static void __set_personality_x32(void) 555 { 556 #ifdef CONFIG_X86_X32 557 clear_thread_flag(TIF_IA32); 558 set_thread_flag(TIF_X32); 559 if (current->mm) 560 current->mm->context.ia32_compat = TIF_X32; 561 current->personality &= ~READ_IMPLIES_EXEC; 562 /* 563 * in_32bit_syscall() uses the presence of the x32 syscall bit 564 * flag to determine compat status. The x86 mmap() code relies on 565 * the syscall bitness so set x32 syscall bit right here to make 566 * in_32bit_syscall() work during exec(). 567 * 568 * Pretend to come from a x32 execve. 569 */ 570 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; 571 current_thread_info()->status &= ~TS_COMPAT; 572 #endif 573 } 574 575 static void __set_personality_ia32(void) 576 { 577 #ifdef CONFIG_IA32_EMULATION 578 set_thread_flag(TIF_IA32); 579 clear_thread_flag(TIF_X32); 580 if (current->mm) 581 current->mm->context.ia32_compat = TIF_IA32; 582 current->personality |= force_personality32; 583 /* Prepare the first "return" to user space */ 584 task_pt_regs(current)->orig_ax = __NR_ia32_execve; 585 current_thread_info()->status |= TS_COMPAT; 586 #endif 587 } 588 589 void set_personality_ia32(bool x32) 590 { 591 /* Make sure to be in 32bit mode */ 592 set_thread_flag(TIF_ADDR32); 593 594 if (x32) 595 __set_personality_x32(); 596 else 597 __set_personality_ia32(); 598 } 599 EXPORT_SYMBOL_GPL(set_personality_ia32); 600 601 #ifdef CONFIG_CHECKPOINT_RESTORE 602 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) 603 { 604 int ret; 605 606 ret = map_vdso_once(image, addr); 607 if (ret) 608 return ret; 609 610 return (long)image->size; 611 } 612 #endif 613 614 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) 615 { 616 int ret = 0; 617 618 switch (option) { 619 case ARCH_SET_GS: { 620 if (unlikely(arg2 >= TASK_SIZE_MAX)) 621 return -EPERM; 622 623 preempt_disable(); 624 /* 625 * ARCH_SET_GS has always overwritten the index 626 * and the base. Zero is the most sensible value 627 * to put in the index, and is the only value that 628 * makes any sense if FSGSBASE is unavailable. 629 */ 630 if (task == current) { 631 loadseg(GS, 0); 632 x86_gsbase_write_cpu_inactive(arg2); 633 634 /* 635 * On non-FSGSBASE systems, save_base_legacy() expects 636 * that we also fill in thread.gsbase. 637 */ 638 task->thread.gsbase = arg2; 639 640 } else { 641 task->thread.gsindex = 0; 642 x86_gsbase_write_task(task, arg2); 643 } 644 preempt_enable(); 645 break; 646 } 647 case ARCH_SET_FS: { 648 /* 649 * Not strictly needed for %fs, but do it for symmetry 650 * with %gs 651 */ 652 if (unlikely(arg2 >= TASK_SIZE_MAX)) 653 return -EPERM; 654 655 preempt_disable(); 656 /* 657 * Set the selector to 0 for the same reason 658 * as %gs above. 659 */ 660 if (task == current) { 661 loadseg(FS, 0); 662 x86_fsbase_write_cpu(arg2); 663 664 /* 665 * On non-FSGSBASE systems, save_base_legacy() expects 666 * that we also fill in thread.fsbase. 667 */ 668 task->thread.fsbase = arg2; 669 } else { 670 task->thread.fsindex = 0; 671 x86_fsbase_write_task(task, arg2); 672 } 673 preempt_enable(); 674 break; 675 } 676 case ARCH_GET_FS: { 677 unsigned long base = x86_fsbase_read_task(task); 678 679 ret = put_user(base, (unsigned long __user *)arg2); 680 break; 681 } 682 case ARCH_GET_GS: { 683 unsigned long base = x86_gsbase_read_task(task); 684 685 ret = put_user(base, (unsigned long __user *)arg2); 686 break; 687 } 688 689 #ifdef CONFIG_CHECKPOINT_RESTORE 690 # ifdef CONFIG_X86_X32_ABI 691 case ARCH_MAP_VDSO_X32: 692 return prctl_map_vdso(&vdso_image_x32, arg2); 693 # endif 694 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 695 case ARCH_MAP_VDSO_32: 696 return prctl_map_vdso(&vdso_image_32, arg2); 697 # endif 698 case ARCH_MAP_VDSO_64: 699 return prctl_map_vdso(&vdso_image_64, arg2); 700 #endif 701 702 default: 703 ret = -EINVAL; 704 break; 705 } 706 707 return ret; 708 } 709 710 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) 711 { 712 long ret; 713 714 ret = do_arch_prctl_64(current, option, arg2); 715 if (ret == -EINVAL) 716 ret = do_arch_prctl_common(current, option, arg2); 717 718 return ret; 719 } 720 721 #ifdef CONFIG_IA32_EMULATION 722 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) 723 { 724 return do_arch_prctl_common(current, option, arg2); 725 } 726 #endif 727 728 unsigned long KSTK_ESP(struct task_struct *task) 729 { 730 return task_pt_regs(task)->sp; 731 } 732