xref: /openbmc/linux/arch/x86/kernel/process_64.c (revision 15d90a6a)
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  *
7  *  X86-64 port
8  *	Andi Kleen.
9  *
10  *	CPU hotplug support - ashok.raj@intel.com
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of process handling..
15  */
16 
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/fs.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
38 #include <linux/io.h>
39 #include <linux/ftrace.h>
40 #include <linux/syscalls.h>
41 
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/fpu/internal.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
47 #include <asm/desc.h>
48 #include <asm/proto.h>
49 #include <asm/ia32.h>
50 #include <asm/syscalls.h>
51 #include <asm/debugreg.h>
52 #include <asm/switch_to.h>
53 #include <asm/xen/hypervisor.h>
54 #include <asm/vdso.h>
55 #include <asm/resctrl_sched.h>
56 #include <asm/unistd.h>
57 #include <asm/fsgsbase.h>
58 #ifdef CONFIG_IA32_EMULATION
59 /* Not included via unistd.h */
60 #include <asm/unistd_32_ia32.h>
61 #endif
62 
63 #include "process.h"
64 
65 /* Prints also some state that isn't saved in the pt_regs */
66 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
67 {
68 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
69 	unsigned long d0, d1, d2, d3, d6, d7;
70 	unsigned int fsindex, gsindex;
71 	unsigned int ds, es;
72 
73 	show_iret_regs(regs);
74 
75 	if (regs->orig_ax != -1)
76 		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
77 	else
78 		pr_cont("\n");
79 
80 	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
81 	       regs->ax, regs->bx, regs->cx);
82 	printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
83 	       regs->dx, regs->si, regs->di);
84 	printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
85 	       regs->bp, regs->r8, regs->r9);
86 	printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
87 	       regs->r10, regs->r11, regs->r12);
88 	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
89 	       regs->r13, regs->r14, regs->r15);
90 
91 	if (mode == SHOW_REGS_SHORT)
92 		return;
93 
94 	if (mode == SHOW_REGS_USER) {
95 		rdmsrl(MSR_FS_BASE, fs);
96 		rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
97 		printk(KERN_DEFAULT "FS:  %016lx GS:  %016lx\n",
98 		       fs, shadowgs);
99 		return;
100 	}
101 
102 	asm("movl %%ds,%0" : "=r" (ds));
103 	asm("movl %%es,%0" : "=r" (es));
104 	asm("movl %%fs,%0" : "=r" (fsindex));
105 	asm("movl %%gs,%0" : "=r" (gsindex));
106 
107 	rdmsrl(MSR_FS_BASE, fs);
108 	rdmsrl(MSR_GS_BASE, gs);
109 	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
110 
111 	cr0 = read_cr0();
112 	cr2 = read_cr2();
113 	cr3 = __read_cr3();
114 	cr4 = __read_cr4();
115 
116 	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
117 	       fs, fsindex, gs, gsindex, shadowgs);
118 	printk(KERN_DEFAULT "CS:  %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
119 			es, cr0);
120 	printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
121 			cr4);
122 
123 	get_debugreg(d0, 0);
124 	get_debugreg(d1, 1);
125 	get_debugreg(d2, 2);
126 	get_debugreg(d3, 3);
127 	get_debugreg(d6, 6);
128 	get_debugreg(d7, 7);
129 
130 	/* Only print out debug registers if they are in their non-default state. */
131 	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
132 	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
133 		printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
134 		       d0, d1, d2);
135 		printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
136 		       d3, d6, d7);
137 	}
138 
139 	if (boot_cpu_has(X86_FEATURE_OSPKE))
140 		printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
141 }
142 
143 void release_thread(struct task_struct *dead_task)
144 {
145 	if (dead_task->mm) {
146 #ifdef CONFIG_MODIFY_LDT_SYSCALL
147 		if (dead_task->mm->context.ldt) {
148 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
149 				dead_task->comm,
150 				dead_task->mm->context.ldt->entries,
151 				dead_task->mm->context.ldt->nr_entries);
152 			BUG();
153 		}
154 #endif
155 	}
156 }
157 
158 enum which_selector {
159 	FS,
160 	GS
161 };
162 
163 /*
164  * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
165  * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
166  * It's forcibly inlined because it'll generate better code and this function
167  * is hot.
168  */
169 static __always_inline void save_base_legacy(struct task_struct *prev_p,
170 					     unsigned short selector,
171 					     enum which_selector which)
172 {
173 	if (likely(selector == 0)) {
174 		/*
175 		 * On Intel (without X86_BUG_NULL_SEG), the segment base could
176 		 * be the pre-existing saved base or it could be zero.  On AMD
177 		 * (with X86_BUG_NULL_SEG), the segment base could be almost
178 		 * anything.
179 		 *
180 		 * This branch is very hot (it's hit twice on almost every
181 		 * context switch between 64-bit programs), and avoiding
182 		 * the RDMSR helps a lot, so we just assume that whatever
183 		 * value is already saved is correct.  This matches historical
184 		 * Linux behavior, so it won't break existing applications.
185 		 *
186 		 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
187 		 * report that the base is zero, it needs to actually be zero:
188 		 * see the corresponding logic in load_seg_legacy.
189 		 */
190 	} else {
191 		/*
192 		 * If the selector is 1, 2, or 3, then the base is zero on
193 		 * !X86_BUG_NULL_SEG CPUs and could be anything on
194 		 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
195 		 * has never attempted to preserve the base across context
196 		 * switches.
197 		 *
198 		 * If selector > 3, then it refers to a real segment, and
199 		 * saving the base isn't necessary.
200 		 */
201 		if (which == FS)
202 			prev_p->thread.fsbase = 0;
203 		else
204 			prev_p->thread.gsbase = 0;
205 	}
206 }
207 
208 static __always_inline void save_fsgs(struct task_struct *task)
209 {
210 	savesegment(fs, task->thread.fsindex);
211 	savesegment(gs, task->thread.gsindex);
212 	save_base_legacy(task, task->thread.fsindex, FS);
213 	save_base_legacy(task, task->thread.gsindex, GS);
214 }
215 
216 #if IS_ENABLED(CONFIG_KVM)
217 /*
218  * While a process is running,current->thread.fsbase and current->thread.gsbase
219  * may not match the corresponding CPU registers (see save_base_legacy()). KVM
220  * wants an efficient way to save and restore FSBASE and GSBASE.
221  * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
222  */
223 void save_fsgs_for_kvm(void)
224 {
225 	save_fsgs(current);
226 }
227 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
228 #endif
229 
230 static __always_inline void loadseg(enum which_selector which,
231 				    unsigned short sel)
232 {
233 	if (which == FS)
234 		loadsegment(fs, sel);
235 	else
236 		load_gs_index(sel);
237 }
238 
239 static __always_inline void load_seg_legacy(unsigned short prev_index,
240 					    unsigned long prev_base,
241 					    unsigned short next_index,
242 					    unsigned long next_base,
243 					    enum which_selector which)
244 {
245 	if (likely(next_index <= 3)) {
246 		/*
247 		 * The next task is using 64-bit TLS, is not using this
248 		 * segment at all, or is having fun with arcane CPU features.
249 		 */
250 		if (next_base == 0) {
251 			/*
252 			 * Nasty case: on AMD CPUs, we need to forcibly zero
253 			 * the base.
254 			 */
255 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
256 				loadseg(which, __USER_DS);
257 				loadseg(which, next_index);
258 			} else {
259 				/*
260 				 * We could try to exhaustively detect cases
261 				 * under which we can skip the segment load,
262 				 * but there's really only one case that matters
263 				 * for performance: if both the previous and
264 				 * next states are fully zeroed, we can skip
265 				 * the load.
266 				 *
267 				 * (This assumes that prev_base == 0 has no
268 				 * false positives.  This is the case on
269 				 * Intel-style CPUs.)
270 				 */
271 				if (likely(prev_index | next_index | prev_base))
272 					loadseg(which, next_index);
273 			}
274 		} else {
275 			if (prev_index != next_index)
276 				loadseg(which, next_index);
277 			wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
278 			       next_base);
279 		}
280 	} else {
281 		/*
282 		 * The next task is using a real segment.  Loading the selector
283 		 * is sufficient.
284 		 */
285 		loadseg(which, next_index);
286 	}
287 }
288 
289 static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
290 					      struct thread_struct *next)
291 {
292 	load_seg_legacy(prev->fsindex, prev->fsbase,
293 			next->fsindex, next->fsbase, FS);
294 	load_seg_legacy(prev->gsindex, prev->gsbase,
295 			next->gsindex, next->gsbase, GS);
296 }
297 
298 static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
299 					    unsigned short selector)
300 {
301 	unsigned short idx = selector >> 3;
302 	unsigned long base;
303 
304 	if (likely((selector & SEGMENT_TI_MASK) == 0)) {
305 		if (unlikely(idx >= GDT_ENTRIES))
306 			return 0;
307 
308 		/*
309 		 * There are no user segments in the GDT with nonzero bases
310 		 * other than the TLS segments.
311 		 */
312 		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
313 			return 0;
314 
315 		idx -= GDT_ENTRY_TLS_MIN;
316 		base = get_desc_base(&task->thread.tls_array[idx]);
317 	} else {
318 #ifdef CONFIG_MODIFY_LDT_SYSCALL
319 		struct ldt_struct *ldt;
320 
321 		/*
322 		 * If performance here mattered, we could protect the LDT
323 		 * with RCU.  This is a slow path, though, so we can just
324 		 * take the mutex.
325 		 */
326 		mutex_lock(&task->mm->context.lock);
327 		ldt = task->mm->context.ldt;
328 		if (unlikely(idx >= ldt->nr_entries))
329 			base = 0;
330 		else
331 			base = get_desc_base(ldt->entries + idx);
332 		mutex_unlock(&task->mm->context.lock);
333 #else
334 		base = 0;
335 #endif
336 	}
337 
338 	return base;
339 }
340 
341 unsigned long x86_fsbase_read_task(struct task_struct *task)
342 {
343 	unsigned long fsbase;
344 
345 	if (task == current)
346 		fsbase = x86_fsbase_read_cpu();
347 	else if (task->thread.fsindex == 0)
348 		fsbase = task->thread.fsbase;
349 	else
350 		fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
351 
352 	return fsbase;
353 }
354 
355 unsigned long x86_gsbase_read_task(struct task_struct *task)
356 {
357 	unsigned long gsbase;
358 
359 	if (task == current)
360 		gsbase = x86_gsbase_read_cpu_inactive();
361 	else if (task->thread.gsindex == 0)
362 		gsbase = task->thread.gsbase;
363 	else
364 		gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
365 
366 	return gsbase;
367 }
368 
369 void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
370 {
371 	WARN_ON_ONCE(task == current);
372 
373 	task->thread.fsbase = fsbase;
374 }
375 
376 void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
377 {
378 	WARN_ON_ONCE(task == current);
379 
380 	task->thread.gsbase = gsbase;
381 }
382 
383 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
384 		unsigned long arg, struct task_struct *p, unsigned long tls)
385 {
386 	int err;
387 	struct pt_regs *childregs;
388 	struct fork_frame *fork_frame;
389 	struct inactive_task_frame *frame;
390 	struct task_struct *me = current;
391 
392 	childregs = task_pt_regs(p);
393 	fork_frame = container_of(childregs, struct fork_frame, regs);
394 	frame = &fork_frame->frame;
395 	frame->bp = 0;
396 	frame->ret_addr = (unsigned long) ret_from_fork;
397 	p->thread.sp = (unsigned long) fork_frame;
398 	p->thread.io_bitmap_ptr = NULL;
399 
400 	savesegment(gs, p->thread.gsindex);
401 	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
402 	savesegment(fs, p->thread.fsindex);
403 	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
404 	savesegment(es, p->thread.es);
405 	savesegment(ds, p->thread.ds);
406 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
407 
408 	if (unlikely(p->flags & PF_KTHREAD)) {
409 		/* kernel thread */
410 		memset(childregs, 0, sizeof(struct pt_regs));
411 		frame->bx = sp;		/* function */
412 		frame->r12 = arg;
413 		return 0;
414 	}
415 	frame->bx = 0;
416 	*childregs = *current_pt_regs();
417 
418 	childregs->ax = 0;
419 	if (sp)
420 		childregs->sp = sp;
421 
422 	err = -ENOMEM;
423 	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
424 		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
425 						  IO_BITMAP_BYTES, GFP_KERNEL);
426 		if (!p->thread.io_bitmap_ptr) {
427 			p->thread.io_bitmap_max = 0;
428 			return -ENOMEM;
429 		}
430 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
431 	}
432 
433 	/*
434 	 * Set a new TLS for the child thread?
435 	 */
436 	if (clone_flags & CLONE_SETTLS) {
437 #ifdef CONFIG_IA32_EMULATION
438 		if (in_ia32_syscall())
439 			err = do_set_thread_area(p, -1,
440 				(struct user_desc __user *)tls, 0);
441 		else
442 #endif
443 			err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
444 		if (err)
445 			goto out;
446 	}
447 	err = 0;
448 out:
449 	if (err && p->thread.io_bitmap_ptr) {
450 		kfree(p->thread.io_bitmap_ptr);
451 		p->thread.io_bitmap_max = 0;
452 	}
453 
454 	return err;
455 }
456 
457 static void
458 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
459 		    unsigned long new_sp,
460 		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
461 {
462 	WARN_ON_ONCE(regs != current_pt_regs());
463 
464 	if (static_cpu_has(X86_BUG_NULL_SEG)) {
465 		/* Loading zero below won't clear the base. */
466 		loadsegment(fs, __USER_DS);
467 		load_gs_index(__USER_DS);
468 	}
469 
470 	loadsegment(fs, 0);
471 	loadsegment(es, _ds);
472 	loadsegment(ds, _ds);
473 	load_gs_index(0);
474 
475 	regs->ip		= new_ip;
476 	regs->sp		= new_sp;
477 	regs->cs		= _cs;
478 	regs->ss		= _ss;
479 	regs->flags		= X86_EFLAGS_IF;
480 	force_iret();
481 }
482 
483 void
484 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
485 {
486 	start_thread_common(regs, new_ip, new_sp,
487 			    __USER_CS, __USER_DS, 0);
488 }
489 EXPORT_SYMBOL_GPL(start_thread);
490 
491 #ifdef CONFIG_COMPAT
492 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
493 {
494 	start_thread_common(regs, new_ip, new_sp,
495 			    test_thread_flag(TIF_X32)
496 			    ? __USER_CS : __USER32_CS,
497 			    __USER_DS, __USER_DS);
498 }
499 #endif
500 
501 /*
502  *	switch_to(x,y) should switch tasks from x to y.
503  *
504  * This could still be optimized:
505  * - fold all the options into a flag word and test it with a single test.
506  * - could test fs/gs bitsliced
507  *
508  * Kprobes not supported here. Set the probe on schedule instead.
509  * Function graph tracer not supported too.
510  */
511 __visible __notrace_funcgraph struct task_struct *
512 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
513 {
514 	struct thread_struct *prev = &prev_p->thread;
515 	struct thread_struct *next = &next_p->thread;
516 	struct fpu *prev_fpu = &prev->fpu;
517 	struct fpu *next_fpu = &next->fpu;
518 	int cpu = smp_processor_id();
519 
520 	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
521 		     this_cpu_read(irq_count) != -1);
522 
523 	switch_fpu_prepare(prev_fpu, cpu);
524 
525 	/* We must save %fs and %gs before load_TLS() because
526 	 * %fs and %gs may be cleared by load_TLS().
527 	 *
528 	 * (e.g. xen_load_tls())
529 	 */
530 	save_fsgs(prev_p);
531 
532 	/*
533 	 * Load TLS before restoring any segments so that segment loads
534 	 * reference the correct GDT entries.
535 	 */
536 	load_TLS(next, cpu);
537 
538 	/*
539 	 * Leave lazy mode, flushing any hypercalls made here.  This
540 	 * must be done after loading TLS entries in the GDT but before
541 	 * loading segments that might reference them, and and it must
542 	 * be done before fpu__restore(), so the TS bit is up to
543 	 * date.
544 	 */
545 	arch_end_context_switch(next_p);
546 
547 	/* Switch DS and ES.
548 	 *
549 	 * Reading them only returns the selectors, but writing them (if
550 	 * nonzero) loads the full descriptor from the GDT or LDT.  The
551 	 * LDT for next is loaded in switch_mm, and the GDT is loaded
552 	 * above.
553 	 *
554 	 * We therefore need to write new values to the segment
555 	 * registers on every context switch unless both the new and old
556 	 * values are zero.
557 	 *
558 	 * Note that we don't need to do anything for CS and SS, as
559 	 * those are saved and restored as part of pt_regs.
560 	 */
561 	savesegment(es, prev->es);
562 	if (unlikely(next->es | prev->es))
563 		loadsegment(es, next->es);
564 
565 	savesegment(ds, prev->ds);
566 	if (unlikely(next->ds | prev->ds))
567 		loadsegment(ds, next->ds);
568 
569 	x86_fsgsbase_load(prev, next);
570 
571 	switch_fpu_finish(next_fpu, cpu);
572 
573 	/*
574 	 * Switch the PDA and FPU contexts.
575 	 */
576 	this_cpu_write(current_task, next_p);
577 	this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
578 
579 	/* Reload sp0. */
580 	update_task_stack(next_p);
581 
582 	switch_to_extra(prev_p, next_p);
583 
584 #ifdef CONFIG_XEN_PV
585 	/*
586 	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
587 	 * current_pt_regs()->flags may not match the current task's
588 	 * intended IOPL.  We need to switch it manually.
589 	 */
590 	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
591 		     prev->iopl != next->iopl))
592 		xen_set_iopl_mask(next->iopl);
593 #endif
594 
595 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
596 		/*
597 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
598 		 * does not update the cached descriptor.  As a result, if we
599 		 * do SYSRET while SS is NULL, we'll end up in user mode with
600 		 * SS apparently equal to __USER_DS but actually unusable.
601 		 *
602 		 * The straightforward workaround would be to fix it up just
603 		 * before SYSRET, but that would slow down the system call
604 		 * fast paths.  Instead, we ensure that SS is never NULL in
605 		 * system call context.  We do this by replacing NULL SS
606 		 * selectors at every context switch.  SYSCALL sets up a valid
607 		 * SS, so the only way to get NULL is to re-enter the kernel
608 		 * from CPL 3 through an interrupt.  Since that can't happen
609 		 * in the same task as a running syscall, we are guaranteed to
610 		 * context switch between every interrupt vector entry and a
611 		 * subsequent SYSRET.
612 		 *
613 		 * We read SS first because SS reads are much faster than
614 		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
615 		 * it previously had a different non-NULL value.
616 		 */
617 		unsigned short ss_sel;
618 		savesegment(ss, ss_sel);
619 		if (ss_sel != __KERNEL_DS)
620 			loadsegment(ss, __KERNEL_DS);
621 	}
622 
623 	/* Load the Intel cache allocation PQR MSR. */
624 	resctrl_sched_in();
625 
626 	return prev_p;
627 }
628 
629 void set_personality_64bit(void)
630 {
631 	/* inherit personality from parent */
632 
633 	/* Make sure to be in 64bit mode */
634 	clear_thread_flag(TIF_IA32);
635 	clear_thread_flag(TIF_ADDR32);
636 	clear_thread_flag(TIF_X32);
637 	/* Pretend that this comes from a 64bit execve */
638 	task_pt_regs(current)->orig_ax = __NR_execve;
639 	current_thread_info()->status &= ~TS_COMPAT;
640 
641 	/* Ensure the corresponding mm is not marked. */
642 	if (current->mm)
643 		current->mm->context.ia32_compat = 0;
644 
645 	/* TBD: overwrites user setup. Should have two bits.
646 	   But 64bit processes have always behaved this way,
647 	   so it's not too bad. The main problem is just that
648 	   32bit children are affected again. */
649 	current->personality &= ~READ_IMPLIES_EXEC;
650 }
651 
652 static void __set_personality_x32(void)
653 {
654 #ifdef CONFIG_X86_X32
655 	clear_thread_flag(TIF_IA32);
656 	set_thread_flag(TIF_X32);
657 	if (current->mm)
658 		current->mm->context.ia32_compat = TIF_X32;
659 	current->personality &= ~READ_IMPLIES_EXEC;
660 	/*
661 	 * in_32bit_syscall() uses the presence of the x32 syscall bit
662 	 * flag to determine compat status.  The x86 mmap() code relies on
663 	 * the syscall bitness so set x32 syscall bit right here to make
664 	 * in_32bit_syscall() work during exec().
665 	 *
666 	 * Pretend to come from a x32 execve.
667 	 */
668 	task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
669 	current_thread_info()->status &= ~TS_COMPAT;
670 #endif
671 }
672 
673 static void __set_personality_ia32(void)
674 {
675 #ifdef CONFIG_IA32_EMULATION
676 	set_thread_flag(TIF_IA32);
677 	clear_thread_flag(TIF_X32);
678 	if (current->mm)
679 		current->mm->context.ia32_compat = TIF_IA32;
680 	current->personality |= force_personality32;
681 	/* Prepare the first "return" to user space */
682 	task_pt_regs(current)->orig_ax = __NR_ia32_execve;
683 	current_thread_info()->status |= TS_COMPAT;
684 #endif
685 }
686 
687 void set_personality_ia32(bool x32)
688 {
689 	/* Make sure to be in 32bit mode */
690 	set_thread_flag(TIF_ADDR32);
691 
692 	if (x32)
693 		__set_personality_x32();
694 	else
695 		__set_personality_ia32();
696 }
697 EXPORT_SYMBOL_GPL(set_personality_ia32);
698 
699 #ifdef CONFIG_CHECKPOINT_RESTORE
700 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
701 {
702 	int ret;
703 
704 	ret = map_vdso_once(image, addr);
705 	if (ret)
706 		return ret;
707 
708 	return (long)image->size;
709 }
710 #endif
711 
712 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
713 {
714 	int ret = 0;
715 
716 	switch (option) {
717 	case ARCH_SET_GS: {
718 		if (unlikely(arg2 >= TASK_SIZE_MAX))
719 			return -EPERM;
720 
721 		preempt_disable();
722 		/*
723 		 * ARCH_SET_GS has always overwritten the index
724 		 * and the base. Zero is the most sensible value
725 		 * to put in the index, and is the only value that
726 		 * makes any sense if FSGSBASE is unavailable.
727 		 */
728 		if (task == current) {
729 			loadseg(GS, 0);
730 			x86_gsbase_write_cpu_inactive(arg2);
731 
732 			/*
733 			 * On non-FSGSBASE systems, save_base_legacy() expects
734 			 * that we also fill in thread.gsbase.
735 			 */
736 			task->thread.gsbase = arg2;
737 
738 		} else {
739 			task->thread.gsindex = 0;
740 			x86_gsbase_write_task(task, arg2);
741 		}
742 		preempt_enable();
743 		break;
744 	}
745 	case ARCH_SET_FS: {
746 		/*
747 		 * Not strictly needed for %fs, but do it for symmetry
748 		 * with %gs
749 		 */
750 		if (unlikely(arg2 >= TASK_SIZE_MAX))
751 			return -EPERM;
752 
753 		preempt_disable();
754 		/*
755 		 * Set the selector to 0 for the same reason
756 		 * as %gs above.
757 		 */
758 		if (task == current) {
759 			loadseg(FS, 0);
760 			x86_fsbase_write_cpu(arg2);
761 
762 			/*
763 			 * On non-FSGSBASE systems, save_base_legacy() expects
764 			 * that we also fill in thread.fsbase.
765 			 */
766 			task->thread.fsbase = arg2;
767 		} else {
768 			task->thread.fsindex = 0;
769 			x86_fsbase_write_task(task, arg2);
770 		}
771 		preempt_enable();
772 		break;
773 	}
774 	case ARCH_GET_FS: {
775 		unsigned long base = x86_fsbase_read_task(task);
776 
777 		ret = put_user(base, (unsigned long __user *)arg2);
778 		break;
779 	}
780 	case ARCH_GET_GS: {
781 		unsigned long base = x86_gsbase_read_task(task);
782 
783 		ret = put_user(base, (unsigned long __user *)arg2);
784 		break;
785 	}
786 
787 #ifdef CONFIG_CHECKPOINT_RESTORE
788 # ifdef CONFIG_X86_X32_ABI
789 	case ARCH_MAP_VDSO_X32:
790 		return prctl_map_vdso(&vdso_image_x32, arg2);
791 # endif
792 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
793 	case ARCH_MAP_VDSO_32:
794 		return prctl_map_vdso(&vdso_image_32, arg2);
795 # endif
796 	case ARCH_MAP_VDSO_64:
797 		return prctl_map_vdso(&vdso_image_64, arg2);
798 #endif
799 
800 	default:
801 		ret = -EINVAL;
802 		break;
803 	}
804 
805 	return ret;
806 }
807 
808 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
809 {
810 	long ret;
811 
812 	ret = do_arch_prctl_64(current, option, arg2);
813 	if (ret == -EINVAL)
814 		ret = do_arch_prctl_common(current, option, arg2);
815 
816 	return ret;
817 }
818 
819 #ifdef CONFIG_IA32_EMULATION
820 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
821 {
822 	return do_arch_prctl_common(current, option, arg2);
823 }
824 #endif
825 
826 unsigned long KSTK_ESP(struct task_struct *task)
827 {
828 	return task_pt_regs(task)->sp;
829 }
830