xref: /openbmc/linux/arch/x86/mm/init.c (revision e5a81929)
1 #include <linux/gfp.h>
2 #include <linux/initrd.h>
3 #include <linux/ioport.h>
4 #include <linux/swap.h>
5 #include <linux/memblock.h>
6 #include <linux/swapfile.h>
7 #include <linux/swapops.h>
8 #include <linux/kmemleak.h>
9 #include <linux/sched/task.h>
10 
11 #include <asm/set_memory.h>
12 #include <asm/cpu_device_id.h>
13 #include <asm/e820/api.h>
14 #include <asm/init.h>
15 #include <asm/page.h>
16 #include <asm/page_types.h>
17 #include <asm/sections.h>
18 #include <asm/setup.h>
19 #include <asm/tlbflush.h>
20 #include <asm/tlb.h>
21 #include <asm/proto.h>
22 #include <asm/dma.h>		/* for MAX_DMA_PFN */
23 #include <asm/kaslr.h>
24 #include <asm/hypervisor.h>
25 #include <asm/cpufeature.h>
26 #include <asm/pti.h>
27 #include <asm/text-patching.h>
28 #include <asm/memtype.h>
29 #include <asm/paravirt.h>
30 
31 /*
32  * We need to define the tracepoints somewhere, and tlb.c
33  * is only compiled when SMP=y.
34  */
35 #include <trace/events/tlb.h>
36 
37 #include "mm_internal.h"
38 
39 /*
40  * Tables translating between page_cache_type_t and pte encoding.
41  *
42  * The default values are defined statically as minimal supported mode;
43  * WC and WT fall back to UC-.  pat_init() updates these values to support
44  * more cache modes, WC and WT, when it is safe to do so.  See pat_init()
45  * for the details.  Note, __early_ioremap() used during early boot-time
46  * takes pgprot_t (pte encoding) and does not use these tables.
47  *
48  *   Index into __cachemode2pte_tbl[] is the cachemode.
49  *
50  *   Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
51  *   (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
52  */
53 static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
54 	[_PAGE_CACHE_MODE_WB      ]	= 0         | 0        ,
55 	[_PAGE_CACHE_MODE_WC      ]	= 0         | _PAGE_PCD,
56 	[_PAGE_CACHE_MODE_UC_MINUS]	= 0         | _PAGE_PCD,
57 	[_PAGE_CACHE_MODE_UC      ]	= _PAGE_PWT | _PAGE_PCD,
58 	[_PAGE_CACHE_MODE_WT      ]	= 0         | _PAGE_PCD,
59 	[_PAGE_CACHE_MODE_WP      ]	= 0         | _PAGE_PCD,
60 };
61 
62 unsigned long cachemode2protval(enum page_cache_mode pcm)
63 {
64 	if (likely(pcm == 0))
65 		return 0;
66 	return __cachemode2pte_tbl[pcm];
67 }
68 EXPORT_SYMBOL(cachemode2protval);
69 
70 static uint8_t __pte2cachemode_tbl[8] = {
71 	[__pte2cm_idx( 0        | 0         | 0        )] = _PAGE_CACHE_MODE_WB,
72 	[__pte2cm_idx(_PAGE_PWT | 0         | 0        )] = _PAGE_CACHE_MODE_UC_MINUS,
73 	[__pte2cm_idx( 0        | _PAGE_PCD | 0        )] = _PAGE_CACHE_MODE_UC_MINUS,
74 	[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0        )] = _PAGE_CACHE_MODE_UC,
75 	[__pte2cm_idx( 0        | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
76 	[__pte2cm_idx(_PAGE_PWT | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
77 	[__pte2cm_idx(0         | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
78 	[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
79 };
80 
81 /*
82  * Check that the write-protect PAT entry is set for write-protect.
83  * To do this without making assumptions how PAT has been set up (Xen has
84  * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache
85  * mode via the __cachemode2pte_tbl[] into protection bits (those protection
86  * bits will select a cache mode of WP or better), and then translate the
87  * protection bits back into the cache mode using __pte2cm_idx() and the
88  * __pte2cachemode_tbl[] array. This will return the really used cache mode.
89  */
90 bool x86_has_pat_wp(void)
91 {
92 	uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP];
93 
94 	return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP;
95 }
96 
97 enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
98 {
99 	unsigned long masked;
100 
101 	masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
102 	if (likely(masked == 0))
103 		return 0;
104 	return __pte2cachemode_tbl[__pte2cm_idx(masked)];
105 }
106 
107 static unsigned long __initdata pgt_buf_start;
108 static unsigned long __initdata pgt_buf_end;
109 static unsigned long __initdata pgt_buf_top;
110 
111 static unsigned long min_pfn_mapped;
112 
113 static bool __initdata can_use_brk_pgt = true;
114 
115 /*
116  * Pages returned are already directly mapped.
117  *
118  * Changing that is likely to break Xen, see commit:
119  *
120  *    279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
121  *
122  * for detailed information.
123  */
124 __ref void *alloc_low_pages(unsigned int num)
125 {
126 	unsigned long pfn;
127 	int i;
128 
129 	if (after_bootmem) {
130 		unsigned int order;
131 
132 		order = get_order((unsigned long)num << PAGE_SHIFT);
133 		return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
134 	}
135 
136 	if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
137 		unsigned long ret = 0;
138 
139 		if (min_pfn_mapped < max_pfn_mapped) {
140 			ret = memblock_phys_alloc_range(
141 					PAGE_SIZE * num, PAGE_SIZE,
142 					min_pfn_mapped << PAGE_SHIFT,
143 					max_pfn_mapped << PAGE_SHIFT);
144 		}
145 		if (!ret && can_use_brk_pgt)
146 			ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
147 
148 		if (!ret)
149 			panic("alloc_low_pages: can not alloc memory");
150 
151 		pfn = ret >> PAGE_SHIFT;
152 	} else {
153 		pfn = pgt_buf_end;
154 		pgt_buf_end += num;
155 	}
156 
157 	for (i = 0; i < num; i++) {
158 		void *adr;
159 
160 		adr = __va((pfn + i) << PAGE_SHIFT);
161 		clear_page(adr);
162 	}
163 
164 	return __va(pfn << PAGE_SHIFT);
165 }
166 
167 /*
168  * By default need to be able to allocate page tables below PGD firstly for
169  * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
170  * With KASLR memory randomization, depending on the machine e820 memory and the
171  * PUD alignment, twice that many pages may be needed when KASLR memory
172  * randomization is enabled.
173  */
174 
175 #ifndef CONFIG_X86_5LEVEL
176 #define INIT_PGD_PAGE_TABLES    3
177 #else
178 #define INIT_PGD_PAGE_TABLES    4
179 #endif
180 
181 #ifndef CONFIG_RANDOMIZE_MEMORY
182 #define INIT_PGD_PAGE_COUNT      (2 * INIT_PGD_PAGE_TABLES)
183 #else
184 #define INIT_PGD_PAGE_COUNT      (4 * INIT_PGD_PAGE_TABLES)
185 #endif
186 
187 #define INIT_PGT_BUF_SIZE	(INIT_PGD_PAGE_COUNT * PAGE_SIZE)
188 RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
189 void  __init early_alloc_pgt_buf(void)
190 {
191 	unsigned long tables = INIT_PGT_BUF_SIZE;
192 	phys_addr_t base;
193 
194 	base = __pa(extend_brk(tables, PAGE_SIZE));
195 
196 	pgt_buf_start = base >> PAGE_SHIFT;
197 	pgt_buf_end = pgt_buf_start;
198 	pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
199 }
200 
201 int after_bootmem;
202 
203 early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
204 
205 struct map_range {
206 	unsigned long start;
207 	unsigned long end;
208 	unsigned page_size_mask;
209 };
210 
211 static int page_size_mask;
212 
213 /*
214  * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
215  * enable and PPro Global page enable), so that any CPU's that boot
216  * up after us can get the correct flags. Invoked on the boot CPU.
217  */
218 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
219 {
220 	mmu_cr4_features |= mask;
221 	if (trampoline_cr4_features)
222 		*trampoline_cr4_features = mmu_cr4_features;
223 	cr4_set_bits(mask);
224 }
225 
226 static void __init probe_page_size_mask(void)
227 {
228 	/*
229 	 * For pagealloc debugging, identity mapping will use small pages.
230 	 * This will simplify cpa(), which otherwise needs to support splitting
231 	 * large pages into small in interrupt context, etc.
232 	 */
233 	if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
234 		page_size_mask |= 1 << PG_LEVEL_2M;
235 	else
236 		direct_gbpages = 0;
237 
238 	/* Enable PSE if available */
239 	if (boot_cpu_has(X86_FEATURE_PSE))
240 		cr4_set_bits_and_update_boot(X86_CR4_PSE);
241 
242 	/* Enable PGE if available */
243 	__supported_pte_mask &= ~_PAGE_GLOBAL;
244 	if (boot_cpu_has(X86_FEATURE_PGE)) {
245 		cr4_set_bits_and_update_boot(X86_CR4_PGE);
246 		__supported_pte_mask |= _PAGE_GLOBAL;
247 	}
248 
249 	/* By the default is everything supported: */
250 	__default_kernel_pte_mask = __supported_pte_mask;
251 	/* Except when with PTI where the kernel is mostly non-Global: */
252 	if (cpu_feature_enabled(X86_FEATURE_PTI))
253 		__default_kernel_pte_mask &= ~_PAGE_GLOBAL;
254 
255 	/* Enable 1 GB linear kernel mappings if available: */
256 	if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
257 		printk(KERN_INFO "Using GB pages for direct mapping\n");
258 		page_size_mask |= 1 << PG_LEVEL_1G;
259 	} else {
260 		direct_gbpages = 0;
261 	}
262 }
263 
264 #define INTEL_MATCH(_model) { .vendor  = X86_VENDOR_INTEL,	\
265 			      .family  = 6,			\
266 			      .model = _model,			\
267 			    }
268 /*
269  * INVLPG may not properly flush Global entries
270  * on these CPUs when PCIDs are enabled.
271  */
272 static const struct x86_cpu_id invlpg_miss_ids[] = {
273 	INTEL_MATCH(INTEL_FAM6_ALDERLAKE   ),
274 	INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
275 	INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
276 	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE  ),
277 	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
278 	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
279 	{}
280 };
281 
282 static void setup_pcid(void)
283 {
284 	if (!IS_ENABLED(CONFIG_X86_64))
285 		return;
286 
287 	if (!boot_cpu_has(X86_FEATURE_PCID))
288 		return;
289 
290 	if (x86_match_cpu(invlpg_miss_ids)) {
291 		pr_info("Incomplete global flushes, disabling PCID");
292 		setup_clear_cpu_cap(X86_FEATURE_PCID);
293 		return;
294 	}
295 
296 	if (boot_cpu_has(X86_FEATURE_PGE)) {
297 		/*
298 		 * This can't be cr4_set_bits_and_update_boot() -- the
299 		 * trampoline code can't handle CR4.PCIDE and it wouldn't
300 		 * do any good anyway.  Despite the name,
301 		 * cr4_set_bits_and_update_boot() doesn't actually cause
302 		 * the bits in question to remain set all the way through
303 		 * the secondary boot asm.
304 		 *
305 		 * Instead, we brute-force it and set CR4.PCIDE manually in
306 		 * start_secondary().
307 		 */
308 		cr4_set_bits(X86_CR4_PCIDE);
309 
310 		/*
311 		 * INVPCID's single-context modes (2/3) only work if we set
312 		 * X86_CR4_PCIDE, *and* we INVPCID support.  It's unusable
313 		 * on systems that have X86_CR4_PCIDE clear, or that have
314 		 * no INVPCID support at all.
315 		 */
316 		if (boot_cpu_has(X86_FEATURE_INVPCID))
317 			setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
318 	} else {
319 		/*
320 		 * flush_tlb_all(), as currently implemented, won't work if
321 		 * PCID is on but PGE is not.  Since that combination
322 		 * doesn't exist on real hardware, there's no reason to try
323 		 * to fully support it, but it's polite to avoid corrupting
324 		 * data if we're on an improperly configured VM.
325 		 */
326 		setup_clear_cpu_cap(X86_FEATURE_PCID);
327 	}
328 }
329 
330 #ifdef CONFIG_X86_32
331 #define NR_RANGE_MR 3
332 #else /* CONFIG_X86_64 */
333 #define NR_RANGE_MR 5
334 #endif
335 
336 static int __meminit save_mr(struct map_range *mr, int nr_range,
337 			     unsigned long start_pfn, unsigned long end_pfn,
338 			     unsigned long page_size_mask)
339 {
340 	if (start_pfn < end_pfn) {
341 		if (nr_range >= NR_RANGE_MR)
342 			panic("run out of range for init_memory_mapping\n");
343 		mr[nr_range].start = start_pfn<<PAGE_SHIFT;
344 		mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
345 		mr[nr_range].page_size_mask = page_size_mask;
346 		nr_range++;
347 	}
348 
349 	return nr_range;
350 }
351 
352 /*
353  * adjust the page_size_mask for small range to go with
354  *	big page size instead small one if nearby are ram too.
355  */
356 static void __ref adjust_range_page_size_mask(struct map_range *mr,
357 							 int nr_range)
358 {
359 	int i;
360 
361 	for (i = 0; i < nr_range; i++) {
362 		if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
363 		    !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
364 			unsigned long start = round_down(mr[i].start, PMD_SIZE);
365 			unsigned long end = round_up(mr[i].end, PMD_SIZE);
366 
367 #ifdef CONFIG_X86_32
368 			if ((end >> PAGE_SHIFT) > max_low_pfn)
369 				continue;
370 #endif
371 
372 			if (memblock_is_region_memory(start, end - start))
373 				mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
374 		}
375 		if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
376 		    !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
377 			unsigned long start = round_down(mr[i].start, PUD_SIZE);
378 			unsigned long end = round_up(mr[i].end, PUD_SIZE);
379 
380 			if (memblock_is_region_memory(start, end - start))
381 				mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
382 		}
383 	}
384 }
385 
386 static const char *page_size_string(struct map_range *mr)
387 {
388 	static const char str_1g[] = "1G";
389 	static const char str_2m[] = "2M";
390 	static const char str_4m[] = "4M";
391 	static const char str_4k[] = "4k";
392 
393 	if (mr->page_size_mask & (1<<PG_LEVEL_1G))
394 		return str_1g;
395 	/*
396 	 * 32-bit without PAE has a 4M large page size.
397 	 * PG_LEVEL_2M is misnamed, but we can at least
398 	 * print out the right size in the string.
399 	 */
400 	if (IS_ENABLED(CONFIG_X86_32) &&
401 	    !IS_ENABLED(CONFIG_X86_PAE) &&
402 	    mr->page_size_mask & (1<<PG_LEVEL_2M))
403 		return str_4m;
404 
405 	if (mr->page_size_mask & (1<<PG_LEVEL_2M))
406 		return str_2m;
407 
408 	return str_4k;
409 }
410 
411 static int __meminit split_mem_range(struct map_range *mr, int nr_range,
412 				     unsigned long start,
413 				     unsigned long end)
414 {
415 	unsigned long start_pfn, end_pfn, limit_pfn;
416 	unsigned long pfn;
417 	int i;
418 
419 	limit_pfn = PFN_DOWN(end);
420 
421 	/* head if not big page alignment ? */
422 	pfn = start_pfn = PFN_DOWN(start);
423 #ifdef CONFIG_X86_32
424 	/*
425 	 * Don't use a large page for the first 2/4MB of memory
426 	 * because there are often fixed size MTRRs in there
427 	 * and overlapping MTRRs into large pages can cause
428 	 * slowdowns.
429 	 */
430 	if (pfn == 0)
431 		end_pfn = PFN_DOWN(PMD_SIZE);
432 	else
433 		end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
434 #else /* CONFIG_X86_64 */
435 	end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
436 #endif
437 	if (end_pfn > limit_pfn)
438 		end_pfn = limit_pfn;
439 	if (start_pfn < end_pfn) {
440 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
441 		pfn = end_pfn;
442 	}
443 
444 	/* big page (2M) range */
445 	start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
446 #ifdef CONFIG_X86_32
447 	end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
448 #else /* CONFIG_X86_64 */
449 	end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
450 	if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
451 		end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
452 #endif
453 
454 	if (start_pfn < end_pfn) {
455 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
456 				page_size_mask & (1<<PG_LEVEL_2M));
457 		pfn = end_pfn;
458 	}
459 
460 #ifdef CONFIG_X86_64
461 	/* big page (1G) range */
462 	start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
463 	end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
464 	if (start_pfn < end_pfn) {
465 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
466 				page_size_mask &
467 				 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
468 		pfn = end_pfn;
469 	}
470 
471 	/* tail is not big page (1G) alignment */
472 	start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
473 	end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
474 	if (start_pfn < end_pfn) {
475 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
476 				page_size_mask & (1<<PG_LEVEL_2M));
477 		pfn = end_pfn;
478 	}
479 #endif
480 
481 	/* tail is not big page (2M) alignment */
482 	start_pfn = pfn;
483 	end_pfn = limit_pfn;
484 	nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
485 
486 	if (!after_bootmem)
487 		adjust_range_page_size_mask(mr, nr_range);
488 
489 	/* try to merge same page size and continuous */
490 	for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
491 		unsigned long old_start;
492 		if (mr[i].end != mr[i+1].start ||
493 		    mr[i].page_size_mask != mr[i+1].page_size_mask)
494 			continue;
495 		/* move it */
496 		old_start = mr[i].start;
497 		memmove(&mr[i], &mr[i+1],
498 			(nr_range - 1 - i) * sizeof(struct map_range));
499 		mr[i--].start = old_start;
500 		nr_range--;
501 	}
502 
503 	for (i = 0; i < nr_range; i++)
504 		pr_debug(" [mem %#010lx-%#010lx] page %s\n",
505 				mr[i].start, mr[i].end - 1,
506 				page_size_string(&mr[i]));
507 
508 	return nr_range;
509 }
510 
511 struct range pfn_mapped[E820_MAX_ENTRIES];
512 int nr_pfn_mapped;
513 
514 static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
515 {
516 	nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
517 					     nr_pfn_mapped, start_pfn, end_pfn);
518 	nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
519 
520 	max_pfn_mapped = max(max_pfn_mapped, end_pfn);
521 
522 	if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
523 		max_low_pfn_mapped = max(max_low_pfn_mapped,
524 					 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
525 }
526 
527 bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
528 {
529 	int i;
530 
531 	for (i = 0; i < nr_pfn_mapped; i++)
532 		if ((start_pfn >= pfn_mapped[i].start) &&
533 		    (end_pfn <= pfn_mapped[i].end))
534 			return true;
535 
536 	return false;
537 }
538 
539 /*
540  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
541  * This runs before bootmem is initialized and gets pages directly from
542  * the physical memory. To access them they are temporarily mapped.
543  */
544 unsigned long __ref init_memory_mapping(unsigned long start,
545 					unsigned long end, pgprot_t prot)
546 {
547 	struct map_range mr[NR_RANGE_MR];
548 	unsigned long ret = 0;
549 	int nr_range, i;
550 
551 	pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
552 	       start, end - 1);
553 
554 	memset(mr, 0, sizeof(mr));
555 	nr_range = split_mem_range(mr, 0, start, end);
556 
557 	for (i = 0; i < nr_range; i++)
558 		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
559 						   mr[i].page_size_mask,
560 						   prot);
561 
562 	add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
563 
564 	return ret >> PAGE_SHIFT;
565 }
566 
567 /*
568  * We need to iterate through the E820 memory map and create direct mappings
569  * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply
570  * create direct mappings for all pfns from [0 to max_low_pfn) and
571  * [4GB to max_pfn) because of possible memory holes in high addresses
572  * that cannot be marked as UC by fixed/variable range MTRRs.
573  * Depending on the alignment of E820 ranges, this may possibly result
574  * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
575  *
576  * init_mem_mapping() calls init_range_memory_mapping() with big range.
577  * That range would have hole in the middle or ends, and only ram parts
578  * will be mapped in init_range_memory_mapping().
579  */
580 static unsigned long __init init_range_memory_mapping(
581 					   unsigned long r_start,
582 					   unsigned long r_end)
583 {
584 	unsigned long start_pfn, end_pfn;
585 	unsigned long mapped_ram_size = 0;
586 	int i;
587 
588 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
589 		u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
590 		u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
591 		if (start >= end)
592 			continue;
593 
594 		/*
595 		 * if it is overlapping with brk pgt, we need to
596 		 * alloc pgt buf from memblock instead.
597 		 */
598 		can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
599 				    min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
600 		init_memory_mapping(start, end, PAGE_KERNEL);
601 		mapped_ram_size += end - start;
602 		can_use_brk_pgt = true;
603 	}
604 
605 	return mapped_ram_size;
606 }
607 
608 static unsigned long __init get_new_step_size(unsigned long step_size)
609 {
610 	/*
611 	 * Initial mapped size is PMD_SIZE (2M).
612 	 * We can not set step_size to be PUD_SIZE (1G) yet.
613 	 * In worse case, when we cross the 1G boundary, and
614 	 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
615 	 * to map 1G range with PTE. Hence we use one less than the
616 	 * difference of page table level shifts.
617 	 *
618 	 * Don't need to worry about overflow in the top-down case, on 32bit,
619 	 * when step_size is 0, round_down() returns 0 for start, and that
620 	 * turns it into 0x100000000ULL.
621 	 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
622 	 * needs to be taken into consideration by the code below.
623 	 */
624 	return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
625 }
626 
627 /**
628  * memory_map_top_down - Map [map_start, map_end) top down
629  * @map_start: start address of the target memory range
630  * @map_end: end address of the target memory range
631  *
632  * This function will setup direct mapping for memory range
633  * [map_start, map_end) in top-down. That said, the page tables
634  * will be allocated at the end of the memory, and we map the
635  * memory in top-down.
636  */
637 static void __init memory_map_top_down(unsigned long map_start,
638 				       unsigned long map_end)
639 {
640 	unsigned long real_end, last_start;
641 	unsigned long step_size;
642 	unsigned long addr;
643 	unsigned long mapped_ram_size = 0;
644 
645 	/*
646 	 * Systems that have many reserved areas near top of the memory,
647 	 * e.g. QEMU with less than 1G RAM and EFI enabled, or Xen, will
648 	 * require lots of 4K mappings which may exhaust pgt_buf.
649 	 * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure
650 	 * there is enough mapped memory that can be allocated from
651 	 * memblock.
652 	 */
653 	addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
654 					 map_end);
655 	memblock_phys_free(addr, PMD_SIZE);
656 	real_end = addr + PMD_SIZE;
657 
658 	/* step_size need to be small so pgt_buf from BRK could cover it */
659 	step_size = PMD_SIZE;
660 	max_pfn_mapped = 0; /* will get exact value next */
661 	min_pfn_mapped = real_end >> PAGE_SHIFT;
662 	last_start = real_end;
663 
664 	/*
665 	 * We start from the top (end of memory) and go to the bottom.
666 	 * The memblock_find_in_range() gets us a block of RAM from the
667 	 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
668 	 * for page table.
669 	 */
670 	while (last_start > map_start) {
671 		unsigned long start;
672 
673 		if (last_start > step_size) {
674 			start = round_down(last_start - 1, step_size);
675 			if (start < map_start)
676 				start = map_start;
677 		} else
678 			start = map_start;
679 		mapped_ram_size += init_range_memory_mapping(start,
680 							last_start);
681 		last_start = start;
682 		min_pfn_mapped = last_start >> PAGE_SHIFT;
683 		if (mapped_ram_size >= step_size)
684 			step_size = get_new_step_size(step_size);
685 	}
686 
687 	if (real_end < map_end)
688 		init_range_memory_mapping(real_end, map_end);
689 }
690 
691 /**
692  * memory_map_bottom_up - Map [map_start, map_end) bottom up
693  * @map_start: start address of the target memory range
694  * @map_end: end address of the target memory range
695  *
696  * This function will setup direct mapping for memory range
697  * [map_start, map_end) in bottom-up. Since we have limited the
698  * bottom-up allocation above the kernel, the page tables will
699  * be allocated just above the kernel and we map the memory
700  * in [map_start, map_end) in bottom-up.
701  */
702 static void __init memory_map_bottom_up(unsigned long map_start,
703 					unsigned long map_end)
704 {
705 	unsigned long next, start;
706 	unsigned long mapped_ram_size = 0;
707 	/* step_size need to be small so pgt_buf from BRK could cover it */
708 	unsigned long step_size = PMD_SIZE;
709 
710 	start = map_start;
711 	min_pfn_mapped = start >> PAGE_SHIFT;
712 
713 	/*
714 	 * We start from the bottom (@map_start) and go to the top (@map_end).
715 	 * The memblock_find_in_range() gets us a block of RAM from the
716 	 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
717 	 * for page table.
718 	 */
719 	while (start < map_end) {
720 		if (step_size && map_end - start > step_size) {
721 			next = round_up(start + 1, step_size);
722 			if (next > map_end)
723 				next = map_end;
724 		} else {
725 			next = map_end;
726 		}
727 
728 		mapped_ram_size += init_range_memory_mapping(start, next);
729 		start = next;
730 
731 		if (mapped_ram_size >= step_size)
732 			step_size = get_new_step_size(step_size);
733 	}
734 }
735 
736 /*
737  * The real mode trampoline, which is required for bootstrapping CPUs
738  * occupies only a small area under the low 1MB.  See reserve_real_mode()
739  * for details.
740  *
741  * If KASLR is disabled the first PGD entry of the direct mapping is copied
742  * to map the real mode trampoline.
743  *
744  * If KASLR is enabled, copy only the PUD which covers the low 1MB
745  * area. This limits the randomization granularity to 1GB for both 4-level
746  * and 5-level paging.
747  */
748 static void __init init_trampoline(void)
749 {
750 #ifdef CONFIG_X86_64
751 	/*
752 	 * The code below will alias kernel page-tables in the user-range of the
753 	 * address space, including the Global bit. So global TLB entries will
754 	 * be created when using the trampoline page-table.
755 	 */
756 	if (!kaslr_memory_enabled())
757 		trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
758 	else
759 		init_trampoline_kaslr();
760 #endif
761 }
762 
763 void __init init_mem_mapping(void)
764 {
765 	unsigned long end;
766 
767 	pti_check_boottime_disable();
768 	probe_page_size_mask();
769 	setup_pcid();
770 
771 #ifdef CONFIG_X86_64
772 	end = max_pfn << PAGE_SHIFT;
773 #else
774 	end = max_low_pfn << PAGE_SHIFT;
775 #endif
776 
777 	/* the ISA range is always mapped regardless of memory holes */
778 	init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
779 
780 	/* Init the trampoline, possibly with KASLR memory offset */
781 	init_trampoline();
782 
783 	/*
784 	 * If the allocation is in bottom-up direction, we setup direct mapping
785 	 * in bottom-up, otherwise we setup direct mapping in top-down.
786 	 */
787 	if (memblock_bottom_up()) {
788 		unsigned long kernel_end = __pa_symbol(_end);
789 
790 		/*
791 		 * we need two separate calls here. This is because we want to
792 		 * allocate page tables above the kernel. So we first map
793 		 * [kernel_end, end) to make memory above the kernel be mapped
794 		 * as soon as possible. And then use page tables allocated above
795 		 * the kernel to map [ISA_END_ADDRESS, kernel_end).
796 		 */
797 		memory_map_bottom_up(kernel_end, end);
798 		memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
799 	} else {
800 		memory_map_top_down(ISA_END_ADDRESS, end);
801 	}
802 
803 #ifdef CONFIG_X86_64
804 	if (max_pfn > max_low_pfn) {
805 		/* can we preserve max_low_pfn ?*/
806 		max_low_pfn = max_pfn;
807 	}
808 #else
809 	early_ioremap_page_table_range_init();
810 #endif
811 
812 	load_cr3(swapper_pg_dir);
813 	__flush_tlb_all();
814 
815 	x86_init.hyper.init_mem_mapping();
816 
817 	early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
818 }
819 
820 /*
821  * Initialize an mm_struct to be used during poking and a pointer to be used
822  * during patching.
823  */
824 void __init poking_init(void)
825 {
826 	spinlock_t *ptl;
827 	pte_t *ptep;
828 
829 	poking_mm = mm_alloc();
830 	BUG_ON(!poking_mm);
831 
832 	/* Xen PV guests need the PGD to be pinned. */
833 	paravirt_enter_mmap(poking_mm);
834 
835 	/*
836 	 * Randomize the poking address, but make sure that the following page
837 	 * will be mapped at the same PMD. We need 2 pages, so find space for 3,
838 	 * and adjust the address if the PMD ends after the first one.
839 	 */
840 	poking_addr = TASK_UNMAPPED_BASE;
841 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
842 		poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
843 			(TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
844 
845 	if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
846 		poking_addr += PAGE_SIZE;
847 
848 	/*
849 	 * We need to trigger the allocation of the page-tables that will be
850 	 * needed for poking now. Later, poking may be performed in an atomic
851 	 * section, which might cause allocation to fail.
852 	 */
853 	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
854 	BUG_ON(!ptep);
855 	pte_unmap_unlock(ptep, ptl);
856 }
857 
858 /*
859  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
860  * is valid. The argument is a physical page number.
861  *
862  * On x86, access has to be given to the first megabyte of RAM because that
863  * area traditionally contains BIOS code and data regions used by X, dosemu,
864  * and similar apps. Since they map the entire memory range, the whole range
865  * must be allowed (for mapping), but any areas that would otherwise be
866  * disallowed are flagged as being "zero filled" instead of rejected.
867  * Access has to be given to non-kernel-ram areas as well, these contain the
868  * PCI mmio resources as well as potential bios/acpi data regions.
869  */
870 int devmem_is_allowed(unsigned long pagenr)
871 {
872 	if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
873 				IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
874 			!= REGION_DISJOINT) {
875 		/*
876 		 * For disallowed memory regions in the low 1MB range,
877 		 * request that the page be shown as all zeros.
878 		 */
879 		if (pagenr < 256)
880 			return 2;
881 
882 		return 0;
883 	}
884 
885 	/*
886 	 * This must follow RAM test, since System RAM is considered a
887 	 * restricted resource under CONFIG_STRICT_DEVMEM.
888 	 */
889 	if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
890 		/* Low 1MB bypasses iomem restrictions. */
891 		if (pagenr < 256)
892 			return 1;
893 
894 		return 0;
895 	}
896 
897 	return 1;
898 }
899 
900 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
901 {
902 	unsigned long begin_aligned, end_aligned;
903 
904 	/* Make sure boundaries are page aligned */
905 	begin_aligned = PAGE_ALIGN(begin);
906 	end_aligned   = end & PAGE_MASK;
907 
908 	if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
909 		begin = begin_aligned;
910 		end   = end_aligned;
911 	}
912 
913 	if (begin >= end)
914 		return;
915 
916 	/*
917 	 * If debugging page accesses then do not free this memory but
918 	 * mark them not present - any buggy init-section access will
919 	 * create a kernel page fault:
920 	 */
921 	if (debug_pagealloc_enabled()) {
922 		pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
923 			begin, end - 1);
924 		/*
925 		 * Inform kmemleak about the hole in the memory since the
926 		 * corresponding pages will be unmapped.
927 		 */
928 		kmemleak_free_part((void *)begin, end - begin);
929 		set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
930 	} else {
931 		/*
932 		 * We just marked the kernel text read only above, now that
933 		 * we are going to free part of that, we need to make that
934 		 * writeable and non-executable first.
935 		 */
936 		set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
937 		set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
938 
939 		free_reserved_area((void *)begin, (void *)end,
940 				   POISON_FREE_INITMEM, what);
941 	}
942 }
943 
944 /*
945  * begin/end can be in the direct map or the "high kernel mapping"
946  * used for the kernel image only.  free_init_pages() will do the
947  * right thing for either kind of address.
948  */
949 void free_kernel_image_pages(const char *what, void *begin, void *end)
950 {
951 	unsigned long begin_ul = (unsigned long)begin;
952 	unsigned long end_ul = (unsigned long)end;
953 	unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
954 
955 	free_init_pages(what, begin_ul, end_ul);
956 
957 	/*
958 	 * PTI maps some of the kernel into userspace.  For performance,
959 	 * this includes some kernel areas that do not contain secrets.
960 	 * Those areas might be adjacent to the parts of the kernel image
961 	 * being freed, which may contain secrets.  Remove the "high kernel
962 	 * image mapping" for these freed areas, ensuring they are not even
963 	 * potentially vulnerable to Meltdown regardless of the specific
964 	 * optimizations PTI is currently using.
965 	 *
966 	 * The "noalias" prevents unmapping the direct map alias which is
967 	 * needed to access the freed pages.
968 	 *
969 	 * This is only valid for 64bit kernels. 32bit has only one mapping
970 	 * which can't be treated in this way for obvious reasons.
971 	 */
972 	if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
973 		set_memory_np_noalias(begin_ul, len_pages);
974 }
975 
976 void __ref free_initmem(void)
977 {
978 	e820__reallocate_tables();
979 
980 	mem_encrypt_free_decrypted_mem();
981 
982 	free_kernel_image_pages("unused kernel image (initmem)",
983 				&__init_begin, &__init_end);
984 }
985 
986 #ifdef CONFIG_BLK_DEV_INITRD
987 void __init free_initrd_mem(unsigned long start, unsigned long end)
988 {
989 	/*
990 	 * end could be not aligned, and We can not align that,
991 	 * decompressor could be confused by aligned initrd_end
992 	 * We already reserve the end partial page before in
993 	 *   - i386_start_kernel()
994 	 *   - x86_64_start_kernel()
995 	 *   - relocate_initrd()
996 	 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
997 	 */
998 	free_init_pages("initrd", start, PAGE_ALIGN(end));
999 }
1000 #endif
1001 
1002 /*
1003  * Calculate the precise size of the DMA zone (first 16 MB of RAM),
1004  * and pass it to the MM layer - to help it set zone watermarks more
1005  * accurately.
1006  *
1007  * Done on 64-bit systems only for the time being, although 32-bit systems
1008  * might benefit from this as well.
1009  */
1010 void __init memblock_find_dma_reserve(void)
1011 {
1012 #ifdef CONFIG_X86_64
1013 	u64 nr_pages = 0, nr_free_pages = 0;
1014 	unsigned long start_pfn, end_pfn;
1015 	phys_addr_t start_addr, end_addr;
1016 	int i;
1017 	u64 u;
1018 
1019 	/*
1020 	 * Iterate over all memory ranges (free and reserved ones alike),
1021 	 * to calculate the total number of pages in the first 16 MB of RAM:
1022 	 */
1023 	nr_pages = 0;
1024 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1025 		start_pfn = min(start_pfn, MAX_DMA_PFN);
1026 		end_pfn   = min(end_pfn,   MAX_DMA_PFN);
1027 
1028 		nr_pages += end_pfn - start_pfn;
1029 	}
1030 
1031 	/*
1032 	 * Iterate over free memory ranges to calculate the number of free
1033 	 * pages in the DMA zone, while not counting potential partial
1034 	 * pages at the beginning or the end of the range:
1035 	 */
1036 	nr_free_pages = 0;
1037 	for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
1038 		start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
1039 		end_pfn   = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
1040 
1041 		if (start_pfn < end_pfn)
1042 			nr_free_pages += end_pfn - start_pfn;
1043 	}
1044 
1045 	set_dma_reserve(nr_pages - nr_free_pages);
1046 #endif
1047 }
1048 
1049 void __init zone_sizes_init(void)
1050 {
1051 	unsigned long max_zone_pfns[MAX_NR_ZONES];
1052 
1053 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1054 
1055 #ifdef CONFIG_ZONE_DMA
1056 	max_zone_pfns[ZONE_DMA]		= min(MAX_DMA_PFN, max_low_pfn);
1057 #endif
1058 #ifdef CONFIG_ZONE_DMA32
1059 	max_zone_pfns[ZONE_DMA32]	= min(MAX_DMA32_PFN, max_low_pfn);
1060 #endif
1061 	max_zone_pfns[ZONE_NORMAL]	= max_low_pfn;
1062 #ifdef CONFIG_HIGHMEM
1063 	max_zone_pfns[ZONE_HIGHMEM]	= max_pfn;
1064 #endif
1065 
1066 	free_area_init(max_zone_pfns);
1067 }
1068 
1069 __visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
1070 	.loaded_mm = &init_mm,
1071 	.next_asid = 1,
1072 	.cr4 = ~0UL,	/* fail hard if we screw up cr4 shadow initialization */
1073 };
1074 
1075 #ifdef CONFIG_ADDRESS_MASKING
1076 DEFINE_PER_CPU(u64, tlbstate_untag_mask);
1077 EXPORT_PER_CPU_SYMBOL(tlbstate_untag_mask);
1078 #endif
1079 
1080 void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
1081 {
1082 	/* entry 0 MUST be WB (hardwired to speed up translations) */
1083 	BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
1084 
1085 	__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
1086 	__pte2cachemode_tbl[entry] = cache;
1087 }
1088 
1089 #ifdef CONFIG_SWAP
1090 unsigned long arch_max_swapfile_size(void)
1091 {
1092 	unsigned long pages;
1093 
1094 	pages = generic_max_swapfile_size();
1095 
1096 	if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
1097 		/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
1098 		unsigned long long l1tf_limit = l1tf_pfn_limit();
1099 		/*
1100 		 * We encode swap offsets also with 3 bits below those for pfn
1101 		 * which makes the usable limit higher.
1102 		 */
1103 #if CONFIG_PGTABLE_LEVELS > 2
1104 		l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1105 #endif
1106 		pages = min_t(unsigned long long, l1tf_limit, pages);
1107 	}
1108 	return pages;
1109 }
1110 #endif
1111