xref: /openbmc/linux/arch/x86/mm/pti.c (revision ccb01374)
1 /*
2  * Copyright(c) 2017 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * This code is based in part on work published here:
14  *
15  *	https://github.com/IAIK/KAISER
16  *
17  * The original work was written by and and signed off by for the Linux
18  * kernel by:
19  *
20  *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21  *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22  *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23  *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
24  *
25  * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26  * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27  *		       Andy Lutomirsky <luto@amacapital.net>
28  */
29 #include <linux/kernel.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/bug.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
36 #include <linux/mm.h>
37 #include <linux/uaccess.h>
38 
39 #include <asm/cpufeature.h>
40 #include <asm/hypervisor.h>
41 #include <asm/vsyscall.h>
42 #include <asm/cmdline.h>
43 #include <asm/pti.h>
44 #include <asm/pgtable.h>
45 #include <asm/pgalloc.h>
46 #include <asm/tlbflush.h>
47 #include <asm/desc.h>
48 #include <asm/sections.h>
49 
50 #undef pr_fmt
51 #define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
52 
53 /* Backporting helper */
54 #ifndef __GFP_NOTRACK
55 #define __GFP_NOTRACK	0
56 #endif
57 
58 /*
59  * Define the page-table levels we clone for user-space on 32
60  * and 64 bit.
61  */
62 #ifdef CONFIG_X86_64
63 #define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PMD
64 #else
65 #define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PTE
66 #endif
67 
68 static void __init pti_print_if_insecure(const char *reason)
69 {
70 	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
71 		pr_info("%s\n", reason);
72 }
73 
74 static void __init pti_print_if_secure(const char *reason)
75 {
76 	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
77 		pr_info("%s\n", reason);
78 }
79 
80 enum pti_mode {
81 	PTI_AUTO = 0,
82 	PTI_FORCE_OFF,
83 	PTI_FORCE_ON
84 } pti_mode;
85 
86 void __init pti_check_boottime_disable(void)
87 {
88 	char arg[5];
89 	int ret;
90 
91 	/* Assume mode is auto unless overridden. */
92 	pti_mode = PTI_AUTO;
93 
94 	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
95 		pti_mode = PTI_FORCE_OFF;
96 		pti_print_if_insecure("disabled on XEN PV.");
97 		return;
98 	}
99 
100 	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
101 	if (ret > 0)  {
102 		if (ret == 3 && !strncmp(arg, "off", 3)) {
103 			pti_mode = PTI_FORCE_OFF;
104 			pti_print_if_insecure("disabled on command line.");
105 			return;
106 		}
107 		if (ret == 2 && !strncmp(arg, "on", 2)) {
108 			pti_mode = PTI_FORCE_ON;
109 			pti_print_if_secure("force enabled on command line.");
110 			goto enable;
111 		}
112 		if (ret == 4 && !strncmp(arg, "auto", 4)) {
113 			pti_mode = PTI_AUTO;
114 			goto autosel;
115 		}
116 	}
117 
118 	if (cmdline_find_option_bool(boot_command_line, "nopti")) {
119 		pti_mode = PTI_FORCE_OFF;
120 		pti_print_if_insecure("disabled on command line.");
121 		return;
122 	}
123 
124 autosel:
125 	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
126 		return;
127 enable:
128 	setup_force_cpu_cap(X86_FEATURE_PTI);
129 }
130 
131 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
132 {
133 	/*
134 	 * Changes to the high (kernel) portion of the kernelmode page
135 	 * tables are not automatically propagated to the usermode tables.
136 	 *
137 	 * Users should keep in mind that, unlike the kernelmode tables,
138 	 * there is no vmalloc_fault equivalent for the usermode tables.
139 	 * Top-level entries added to init_mm's usermode pgd after boot
140 	 * will not be automatically propagated to other mms.
141 	 */
142 	if (!pgdp_maps_userspace(pgdp))
143 		return pgd;
144 
145 	/*
146 	 * The user page tables get the full PGD, accessible from
147 	 * userspace:
148 	 */
149 	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
150 
151 	/*
152 	 * If this is normal user memory, make it NX in the kernel
153 	 * pagetables so that, if we somehow screw up and return to
154 	 * usermode with the kernel CR3 loaded, we'll get a page fault
155 	 * instead of allowing user code to execute with the wrong CR3.
156 	 *
157 	 * As exceptions, we don't set NX if:
158 	 *  - _PAGE_USER is not set.  This could be an executable
159 	 *     EFI runtime mapping or something similar, and the kernel
160 	 *     may execute from it
161 	 *  - we don't have NX support
162 	 *  - we're clearing the PGD (i.e. the new pgd is not present).
163 	 */
164 	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
165 	    (__supported_pte_mask & _PAGE_NX))
166 		pgd.pgd |= _PAGE_NX;
167 
168 	/* return the copy of the PGD we want the kernel to use: */
169 	return pgd;
170 }
171 
172 /*
173  * Walk the user copy of the page tables (optionally) trying to allocate
174  * page table pages on the way down.
175  *
176  * Returns a pointer to a P4D on success, or NULL on failure.
177  */
178 static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
179 {
180 	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
181 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
182 
183 	if (address < PAGE_OFFSET) {
184 		WARN_ONCE(1, "attempt to walk user address\n");
185 		return NULL;
186 	}
187 
188 	if (pgd_none(*pgd)) {
189 		unsigned long new_p4d_page = __get_free_page(gfp);
190 		if (WARN_ON_ONCE(!new_p4d_page))
191 			return NULL;
192 
193 		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
194 	}
195 	BUILD_BUG_ON(pgd_large(*pgd) != 0);
196 
197 	return p4d_offset(pgd, address);
198 }
199 
200 /*
201  * Walk the user copy of the page tables (optionally) trying to allocate
202  * page table pages on the way down.
203  *
204  * Returns a pointer to a PMD on success, or NULL on failure.
205  */
206 static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
207 {
208 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
209 	p4d_t *p4d;
210 	pud_t *pud;
211 
212 	p4d = pti_user_pagetable_walk_p4d(address);
213 	if (!p4d)
214 		return NULL;
215 
216 	BUILD_BUG_ON(p4d_large(*p4d) != 0);
217 	if (p4d_none(*p4d)) {
218 		unsigned long new_pud_page = __get_free_page(gfp);
219 		if (WARN_ON_ONCE(!new_pud_page))
220 			return NULL;
221 
222 		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
223 	}
224 
225 	pud = pud_offset(p4d, address);
226 	/* The user page tables do not use large mappings: */
227 	if (pud_large(*pud)) {
228 		WARN_ON(1);
229 		return NULL;
230 	}
231 	if (pud_none(*pud)) {
232 		unsigned long new_pmd_page = __get_free_page(gfp);
233 		if (WARN_ON_ONCE(!new_pmd_page))
234 			return NULL;
235 
236 		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
237 	}
238 
239 	return pmd_offset(pud, address);
240 }
241 
242 /*
243  * Walk the shadow copy of the page tables (optionally) trying to allocate
244  * page table pages on the way down.  Does not support large pages.
245  *
246  * Note: this is only used when mapping *new* kernel data into the
247  * user/shadow page tables.  It is never used for userspace data.
248  *
249  * Returns a pointer to a PTE on success, or NULL on failure.
250  */
251 static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
252 {
253 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
254 	pmd_t *pmd;
255 	pte_t *pte;
256 
257 	pmd = pti_user_pagetable_walk_pmd(address);
258 	if (!pmd)
259 		return NULL;
260 
261 	/* We can't do anything sensible if we hit a large mapping. */
262 	if (pmd_large(*pmd)) {
263 		WARN_ON(1);
264 		return NULL;
265 	}
266 
267 	if (pmd_none(*pmd)) {
268 		unsigned long new_pte_page = __get_free_page(gfp);
269 		if (!new_pte_page)
270 			return NULL;
271 
272 		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
273 	}
274 
275 	pte = pte_offset_kernel(pmd, address);
276 	if (pte_flags(*pte) & _PAGE_USER) {
277 		WARN_ONCE(1, "attempt to walk to user pte\n");
278 		return NULL;
279 	}
280 	return pte;
281 }
282 
283 #ifdef CONFIG_X86_VSYSCALL_EMULATION
284 static void __init pti_setup_vsyscall(void)
285 {
286 	pte_t *pte, *target_pte;
287 	unsigned int level;
288 
289 	pte = lookup_address(VSYSCALL_ADDR, &level);
290 	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
291 		return;
292 
293 	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
294 	if (WARN_ON(!target_pte))
295 		return;
296 
297 	*target_pte = *pte;
298 	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
299 }
300 #else
301 static void __init pti_setup_vsyscall(void) { }
302 #endif
303 
304 enum pti_clone_level {
305 	PTI_CLONE_PMD,
306 	PTI_CLONE_PTE,
307 };
308 
309 static void
310 pti_clone_pgtable(unsigned long start, unsigned long end,
311 		  enum pti_clone_level level)
312 {
313 	unsigned long addr;
314 
315 	/*
316 	 * Clone the populated PMDs which cover start to end. These PMD areas
317 	 * can have holes.
318 	 */
319 	for (addr = start; addr < end;) {
320 		pte_t *pte, *target_pte;
321 		pmd_t *pmd, *target_pmd;
322 		pgd_t *pgd;
323 		p4d_t *p4d;
324 		pud_t *pud;
325 
326 		/* Overflow check */
327 		if (addr < start)
328 			break;
329 
330 		pgd = pgd_offset_k(addr);
331 		if (WARN_ON(pgd_none(*pgd)))
332 			return;
333 		p4d = p4d_offset(pgd, addr);
334 		if (WARN_ON(p4d_none(*p4d)))
335 			return;
336 
337 		pud = pud_offset(p4d, addr);
338 		if (pud_none(*pud)) {
339 			addr += PUD_SIZE;
340 			continue;
341 		}
342 
343 		pmd = pmd_offset(pud, addr);
344 		if (pmd_none(*pmd)) {
345 			addr += PMD_SIZE;
346 			continue;
347 		}
348 
349 		if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
350 			target_pmd = pti_user_pagetable_walk_pmd(addr);
351 			if (WARN_ON(!target_pmd))
352 				return;
353 
354 			/*
355 			 * Only clone present PMDs.  This ensures only setting
356 			 * _PAGE_GLOBAL on present PMDs.  This should only be
357 			 * called on well-known addresses anyway, so a non-
358 			 * present PMD would be a surprise.
359 			 */
360 			if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
361 				return;
362 
363 			/*
364 			 * Setting 'target_pmd' below creates a mapping in both
365 			 * the user and kernel page tables.  It is effectively
366 			 * global, so set it as global in both copies.  Note:
367 			 * the X86_FEATURE_PGE check is not _required_ because
368 			 * the CPU ignores _PAGE_GLOBAL when PGE is not
369 			 * supported.  The check keeps consistentency with
370 			 * code that only set this bit when supported.
371 			 */
372 			if (boot_cpu_has(X86_FEATURE_PGE))
373 				*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
374 
375 			/*
376 			 * Copy the PMD.  That is, the kernelmode and usermode
377 			 * tables will share the last-level page tables of this
378 			 * address range
379 			 */
380 			*target_pmd = *pmd;
381 
382 			addr += PMD_SIZE;
383 
384 		} else if (level == PTI_CLONE_PTE) {
385 
386 			/* Walk the page-table down to the pte level */
387 			pte = pte_offset_kernel(pmd, addr);
388 			if (pte_none(*pte)) {
389 				addr += PAGE_SIZE;
390 				continue;
391 			}
392 
393 			/* Only clone present PTEs */
394 			if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
395 				return;
396 
397 			/* Allocate PTE in the user page-table */
398 			target_pte = pti_user_pagetable_walk_pte(addr);
399 			if (WARN_ON(!target_pte))
400 				return;
401 
402 			/* Set GLOBAL bit in both PTEs */
403 			if (boot_cpu_has(X86_FEATURE_PGE))
404 				*pte = pte_set_flags(*pte, _PAGE_GLOBAL);
405 
406 			/* Clone the PTE */
407 			*target_pte = *pte;
408 
409 			addr += PAGE_SIZE;
410 
411 		} else {
412 			BUG();
413 		}
414 	}
415 }
416 
417 #ifdef CONFIG_X86_64
418 /*
419  * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
420  * next-level entry on 5-level systems.
421  */
422 static void __init pti_clone_p4d(unsigned long addr)
423 {
424 	p4d_t *kernel_p4d, *user_p4d;
425 	pgd_t *kernel_pgd;
426 
427 	user_p4d = pti_user_pagetable_walk_p4d(addr);
428 	if (!user_p4d)
429 		return;
430 
431 	kernel_pgd = pgd_offset_k(addr);
432 	kernel_p4d = p4d_offset(kernel_pgd, addr);
433 	*user_p4d = *kernel_p4d;
434 }
435 
436 /*
437  * Clone the CPU_ENTRY_AREA and associated data into the user space visible
438  * page table.
439  */
440 static void __init pti_clone_user_shared(void)
441 {
442 	unsigned int cpu;
443 
444 	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
445 
446 	for_each_possible_cpu(cpu) {
447 		/*
448 		 * The SYSCALL64 entry code needs to be able to find the
449 		 * thread stack and needs one word of scratch space in which
450 		 * to spill a register.  All of this lives in the TSS, in
451 		 * the sp1 and sp2 slots.
452 		 *
453 		 * This is done for all possible CPUs during boot to ensure
454 		 * that it's propagated to all mms.  If we were to add one of
455 		 * these mappings during CPU hotplug, we would need to take
456 		 * some measure to make sure that every mm that subsequently
457 		 * ran on that CPU would have the relevant PGD entry in its
458 		 * pagetables.  The usual vmalloc_fault() mechanism would not
459 		 * work for page faults taken in entry_SYSCALL_64 before RSP
460 		 * is set up.
461 		 */
462 
463 		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
464 		phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
465 		pte_t *target_pte;
466 
467 		target_pte = pti_user_pagetable_walk_pte(va);
468 		if (WARN_ON(!target_pte))
469 			return;
470 
471 		*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
472 	}
473 }
474 
475 #else /* CONFIG_X86_64 */
476 
477 /*
478  * On 32 bit PAE systems with 1GB of Kernel address space there is only
479  * one pgd/p4d for the whole kernel. Cloning that would map the whole
480  * address space into the user page-tables, making PTI useless. So clone
481  * the page-table on the PMD level to prevent that.
482  */
483 static void __init pti_clone_user_shared(void)
484 {
485 	unsigned long start, end;
486 
487 	start = CPU_ENTRY_AREA_BASE;
488 	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
489 
490 	pti_clone_pgtable(start, end, PTI_CLONE_PMD);
491 }
492 #endif /* CONFIG_X86_64 */
493 
494 /*
495  * Clone the ESPFIX P4D into the user space visible page table
496  */
497 static void __init pti_setup_espfix64(void)
498 {
499 #ifdef CONFIG_X86_ESPFIX64
500 	pti_clone_p4d(ESPFIX_BASE_ADDR);
501 #endif
502 }
503 
504 /*
505  * Clone the populated PMDs of the entry and irqentry text and force it RO.
506  */
507 static void pti_clone_entry_text(void)
508 {
509 	pti_clone_pgtable((unsigned long) __entry_text_start,
510 			  (unsigned long) __irqentry_text_end,
511 			  PTI_CLONE_PMD);
512 }
513 
514 /*
515  * Global pages and PCIDs are both ways to make kernel TLB entries
516  * live longer, reduce TLB misses and improve kernel performance.
517  * But, leaving all kernel text Global makes it potentially accessible
518  * to Meltdown-style attacks which make it trivial to find gadgets or
519  * defeat KASLR.
520  *
521  * Only use global pages when it is really worth it.
522  */
523 static inline bool pti_kernel_image_global_ok(void)
524 {
525 	/*
526 	 * Systems with PCIDs get litlle benefit from global
527 	 * kernel text and are not worth the downsides.
528 	 */
529 	if (cpu_feature_enabled(X86_FEATURE_PCID))
530 		return false;
531 
532 	/*
533 	 * Only do global kernel image for pti=auto.  Do the most
534 	 * secure thing (not global) if pti=on specified.
535 	 */
536 	if (pti_mode != PTI_AUTO)
537 		return false;
538 
539 	/*
540 	 * K8 may not tolerate the cleared _PAGE_RW on the userspace
541 	 * global kernel image pages.  Do the safe thing (disable
542 	 * global kernel image).  This is unlikely to ever be
543 	 * noticed because PTI is disabled by default on AMD CPUs.
544 	 */
545 	if (boot_cpu_has(X86_FEATURE_K8))
546 		return false;
547 
548 	/*
549 	 * RANDSTRUCT derives its hardening benefits from the
550 	 * attacker's lack of knowledge about the layout of kernel
551 	 * data structures.  Keep the kernel image non-global in
552 	 * cases where RANDSTRUCT is in use to help keep the layout a
553 	 * secret.
554 	 */
555 	if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
556 		return false;
557 
558 	return true;
559 }
560 
561 /*
562  * This is the only user for these and it is not arch-generic
563  * like the other set_memory.h functions.  Just extern them.
564  */
565 extern int set_memory_nonglobal(unsigned long addr, int numpages);
566 extern int set_memory_global(unsigned long addr, int numpages);
567 
568 /*
569  * For some configurations, map all of kernel text into the user page
570  * tables.  This reduces TLB misses, especially on non-PCID systems.
571  */
572 static void pti_clone_kernel_text(void)
573 {
574 	/*
575 	 * rodata is part of the kernel image and is normally
576 	 * readable on the filesystem or on the web.  But, do not
577 	 * clone the areas past rodata, they might contain secrets.
578 	 */
579 	unsigned long start = PFN_ALIGN(_text);
580 	unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
581 	unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
582 
583 	if (!pti_kernel_image_global_ok())
584 		return;
585 
586 	pr_debug("mapping partial kernel image into user address space\n");
587 
588 	/*
589 	 * Note that this will undo _some_ of the work that
590 	 * pti_set_kernel_image_nonglobal() did to clear the
591 	 * global bit.
592 	 */
593 	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
594 
595 	/*
596 	 * pti_clone_pgtable() will set the global bit in any PMDs
597 	 * that it clones, but we also need to get any PTEs in
598 	 * the last level for areas that are not huge-page-aligned.
599 	 */
600 
601 	/* Set the global bit for normal non-__init kernel text: */
602 	set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
603 }
604 
605 void pti_set_kernel_image_nonglobal(void)
606 {
607 	/*
608 	 * The identity map is created with PMDs, regardless of the
609 	 * actual length of the kernel.  We need to clear
610 	 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
611 	 * of the image.
612 	 */
613 	unsigned long start = PFN_ALIGN(_text);
614 	unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
615 
616 	/*
617 	 * This clears _PAGE_GLOBAL from the entire kernel image.
618 	 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
619 	 * areas that are mapped to userspace.
620 	 */
621 	set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
622 }
623 
624 /*
625  * Initialize kernel page table isolation
626  */
627 void __init pti_init(void)
628 {
629 	if (!static_cpu_has(X86_FEATURE_PTI))
630 		return;
631 
632 	pr_info("enabled\n");
633 
634 #ifdef CONFIG_X86_32
635 	/*
636 	 * We check for X86_FEATURE_PCID here. But the init-code will
637 	 * clear the feature flag on 32 bit because the feature is not
638 	 * supported on 32 bit anyway. To print the warning we need to
639 	 * check with cpuid directly again.
640 	 */
641 	if (cpuid_ecx(0x1) & BIT(17)) {
642 		/* Use printk to work around pr_fmt() */
643 		printk(KERN_WARNING "\n");
644 		printk(KERN_WARNING "************************************************************\n");
645 		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
646 		printk(KERN_WARNING "**                                                        **\n");
647 		printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
648 		printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
649 		printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
650 		printk(KERN_WARNING "**                                                        **\n");
651 		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
652 		printk(KERN_WARNING "************************************************************\n");
653 	}
654 #endif
655 
656 	pti_clone_user_shared();
657 
658 	/* Undo all global bits from the init pagetables in head_64.S: */
659 	pti_set_kernel_image_nonglobal();
660 	/* Replace some of the global bits just for shared entry text: */
661 	pti_clone_entry_text();
662 	pti_setup_espfix64();
663 	pti_setup_vsyscall();
664 }
665 
666 /*
667  * Finalize the kernel mappings in the userspace page-table. Some of the
668  * mappings for the kernel image might have changed since pti_init()
669  * cloned them. This is because parts of the kernel image have been
670  * mapped RO and/or NX.  These changes need to be cloned again to the
671  * userspace page-table.
672  */
673 void pti_finalize(void)
674 {
675 	/*
676 	 * We need to clone everything (again) that maps parts of the
677 	 * kernel image.
678 	 */
679 	pti_clone_entry_text();
680 	pti_clone_kernel_text();
681 
682 	debug_checkwx_user();
683 }
684