15a0e3ad6STejun Heo #include <linux/gfp.h>
22c1b284eSJaswinder Singh Rajput #include <linux/initrd.h>
3540aca06SPekka Enberg #include <linux/ioport.h>
4e5b2bb55SPekka Enberg #include <linux/swap.h>
5a9ce6bc1SYinghai Lu #include <linux/memblock.h>
6377eeaa8SAndi Kleen #include <linux/swapfile.h>
7377eeaa8SAndi Kleen #include <linux/swapops.h>
80d02113bSQian Cai #include <linux/kmemleak.h>
94fc19708SNadav Amit #include <linux/sched/task.h>
10540aca06SPekka Enberg
11d1163651SLaura Abbott #include <asm/set_memory.h>
12ce0b15d1SDave Hansen #include <asm/cpu_device_id.h>
1366441bd3SIngo Molnar #include <asm/e820/api.h>
144fcb2083SPekka Enberg #include <asm/init.h>
15e5b2bb55SPekka Enberg #include <asm/page.h>
16540aca06SPekka Enberg #include <asm/page_types.h>
17e5b2bb55SPekka Enberg #include <asm/sections.h>
1849834396SJan Beulich #include <asm/setup.h>
19f765090aSPekka Enberg #include <asm/tlbflush.h>
209518e0e4SPekka Enberg #include <asm/tlb.h>
2176c06927SJaswinder Singh Rajput #include <asm/proto.h>
2217623915SPekka Enberg #include <asm/dma.h> /* for MAX_DMA_PFN */
230483e1faSThomas Garnier #include <asm/kaslr.h>
24c138d811SJuergen Gross #include <asm/hypervisor.h>
25c7ad5ad2SAndy Lutomirski #include <asm/cpufeature.h>
26aa8c6248SThomas Gleixner #include <asm/pti.h>
274fc19708SNadav Amit #include <asm/text-patching.h>
28d5249bc7SBenjamin Thiel #include <asm/memtype.h>
2926ce6ec3SJuergen Gross #include <asm/paravirt.h>
309518e0e4SPekka Enberg
31d17d8f9dSDave Hansen /*
32d17d8f9dSDave Hansen * We need to define the tracepoints somewhere, and tlb.c
33d9f6e12fSIngo Molnar * is only compiled when SMP=y.
34d17d8f9dSDave Hansen */
35d17d8f9dSDave Hansen #include <trace/events/tlb.h>
36d17d8f9dSDave Hansen
375c51bdbeSYinghai Lu #include "mm_internal.h"
385c51bdbeSYinghai Lu
39281d4078SJuergen Gross /*
40281d4078SJuergen Gross * Tables translating between page_cache_type_t and pte encoding.
41c709fedaSIngo Molnar *
42d5dc861bSToshi Kani * The default values are defined statically as minimal supported mode;
43d5dc861bSToshi Kani * WC and WT fall back to UC-. pat_init() updates these values to support
44d5dc861bSToshi Kani * more cache modes, WC and WT, when it is safe to do so. See pat_init()
45d5dc861bSToshi Kani * for the details. Note, __early_ioremap() used during early boot-time
46d5dc861bSToshi Kani * takes pgprot_t (pte encoding) and does not use these tables.
47c709fedaSIngo Molnar *
48c709fedaSIngo Molnar * Index into __cachemode2pte_tbl[] is the cachemode.
49c709fedaSIngo Molnar *
50c709fedaSIngo Molnar * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
51281d4078SJuergen Gross * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
52281d4078SJuergen Gross */
53de17a378SChristoph Hellwig static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
54c709fedaSIngo Molnar [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
559cd25aacSBorislav Petkov [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
56c709fedaSIngo Molnar [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
57c709fedaSIngo Molnar [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
58c709fedaSIngo Molnar [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
59c709fedaSIngo Molnar [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
60281d4078SJuergen Gross };
61c709fedaSIngo Molnar
cachemode2protval(enum page_cache_mode pcm)62de17a378SChristoph Hellwig unsigned long cachemode2protval(enum page_cache_mode pcm)
63de17a378SChristoph Hellwig {
64de17a378SChristoph Hellwig if (likely(pcm == 0))
65de17a378SChristoph Hellwig return 0;
66de17a378SChristoph Hellwig return __cachemode2pte_tbl[pcm];
67de17a378SChristoph Hellwig }
68de17a378SChristoph Hellwig EXPORT_SYMBOL(cachemode2protval);
6931bb7723SJuergen Gross
707fa3e10fSChristoph Hellwig static uint8_t __pte2cachemode_tbl[8] = {
71c709fedaSIngo Molnar [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
729cd25aacSBorislav Petkov [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
73c709fedaSIngo Molnar [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
74c709fedaSIngo Molnar [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
75c709fedaSIngo Molnar [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
769cd25aacSBorislav Petkov [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
77c709fedaSIngo Molnar [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
78281d4078SJuergen Gross [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
79281d4078SJuergen Gross };
80281d4078SJuergen Gross
81230ec83dSJuergen Gross /*
82230ec83dSJuergen Gross * Check that the write-protect PAT entry is set for write-protect.
83230ec83dSJuergen Gross * To do this without making assumptions how PAT has been set up (Xen has
84230ec83dSJuergen Gross * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache
85230ec83dSJuergen Gross * mode via the __cachemode2pte_tbl[] into protection bits (those protection
86230ec83dSJuergen Gross * bits will select a cache mode of WP or better), and then translate the
87230ec83dSJuergen Gross * protection bits back into the cache mode using __pte2cm_idx() and the
88230ec83dSJuergen Gross * __pte2cachemode_tbl[] array. This will return the really used cache mode.
89230ec83dSJuergen Gross */
x86_has_pat_wp(void)901f6f655eSChristoph Hellwig bool x86_has_pat_wp(void)
911f6f655eSChristoph Hellwig {
92230ec83dSJuergen Gross uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP];
93230ec83dSJuergen Gross
94230ec83dSJuergen Gross return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP;
951f6f655eSChristoph Hellwig }
961f6f655eSChristoph Hellwig
pgprot2cachemode(pgprot_t pgprot)977fa3e10fSChristoph Hellwig enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
987fa3e10fSChristoph Hellwig {
997fa3e10fSChristoph Hellwig unsigned long masked;
1007fa3e10fSChristoph Hellwig
1017fa3e10fSChristoph Hellwig masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
1027fa3e10fSChristoph Hellwig if (likely(masked == 0))
1037fa3e10fSChristoph Hellwig return 0;
1047fa3e10fSChristoph Hellwig return __pte2cachemode_tbl[__pte2cm_idx(masked)];
1057fa3e10fSChristoph Hellwig }
1065c51bdbeSYinghai Lu
1075c51bdbeSYinghai Lu static unsigned long __initdata pgt_buf_start;
108c9b3234aSYinghai Lu static unsigned long __initdata pgt_buf_end;
1095c51bdbeSYinghai Lu static unsigned long __initdata pgt_buf_top;
1105c51bdbeSYinghai Lu
111d4dd100fSZhi Yong Wu static unsigned long min_pfn_mapped;
1125c51bdbeSYinghai Lu
1135c51bdbeSYinghai Lu static bool __initdata can_use_brk_pgt = true;
11422c8ca2aSYinghai Lu
1155c51bdbeSYinghai Lu /*
116d4dd100fSZhi Yong Wu * Pages returned are already directly mapped.
11722c8ca2aSYinghai Lu *
1185c51bdbeSYinghai Lu * Changing that is likely to break Xen, see commit:
11922c8ca2aSYinghai Lu *
12022c8ca2aSYinghai Lu * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
12122c8ca2aSYinghai Lu *
122c9b3234aSYinghai Lu * for detailed information.
123c9b3234aSYinghai Lu */
alloc_low_pages(unsigned int num)12422c8ca2aSYinghai Lu __ref void *alloc_low_pages(unsigned int num)
1255c51bdbeSYinghai Lu {
12622c8ca2aSYinghai Lu unsigned long pfn;
12722c8ca2aSYinghai Lu int i;
12822c8ca2aSYinghai Lu
12922c8ca2aSYinghai Lu if (after_bootmem) {
1305c51bdbeSYinghai Lu unsigned int order;
13122c8ca2aSYinghai Lu
13222c8ca2aSYinghai Lu order = get_order((unsigned long)num << PAGE_SHIFT);
13375f296d9SLevin, Alexander (Sasha Levin) return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
1345c51bdbeSYinghai Lu }
1355c51bdbeSYinghai Lu
1365c51bdbeSYinghai Lu if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
13775f2d3a0SJuergen Gross unsigned long ret = 0;
13875f2d3a0SJuergen Gross
13975f2d3a0SJuergen Gross if (min_pfn_mapped < max_pfn_mapped) {
140a7259df7SMike Rapoport ret = memblock_phys_alloc_range(
141a7259df7SMike Rapoport PAGE_SIZE * num, PAGE_SIZE,
14275f2d3a0SJuergen Gross min_pfn_mapped << PAGE_SHIFT,
143a7259df7SMike Rapoport max_pfn_mapped << PAGE_SHIFT);
14475f2d3a0SJuergen Gross }
145a7259df7SMike Rapoport if (!ret && can_use_brk_pgt)
14675f2d3a0SJuergen Gross ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
14775f2d3a0SJuergen Gross
1485c51bdbeSYinghai Lu if (!ret)
1495c51bdbeSYinghai Lu panic("alloc_low_pages: can not alloc memory");
15075f2d3a0SJuergen Gross
1515c51bdbeSYinghai Lu pfn = ret >> PAGE_SHIFT;
1525c51bdbeSYinghai Lu } else {
1535c51bdbeSYinghai Lu pfn = pgt_buf_end;
1545c51bdbeSYinghai Lu pgt_buf_end += num;
1555c51bdbeSYinghai Lu }
1565c51bdbeSYinghai Lu
1575c51bdbeSYinghai Lu for (i = 0; i < num; i++) {
1585c51bdbeSYinghai Lu void *adr;
1595c51bdbeSYinghai Lu
1605c51bdbeSYinghai Lu adr = __va((pfn + i) << PAGE_SHIFT);
1615c51bdbeSYinghai Lu clear_page(adr);
16222c8ca2aSYinghai Lu }
16322c8ca2aSYinghai Lu
16422c8ca2aSYinghai Lu return __va(pfn << PAGE_SHIFT);
1655c51bdbeSYinghai Lu }
1665c51bdbeSYinghai Lu
167fb754f95SThomas Garnier /*
168167dcfc0SLorenzo Stoakes * By default need to be able to allocate page tables below PGD firstly for
169167dcfc0SLorenzo Stoakes * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
170167dcfc0SLorenzo Stoakes * With KASLR memory randomization, depending on the machine e820 memory and the
171167dcfc0SLorenzo Stoakes * PUD alignment, twice that many pages may be needed when KASLR memory
172fb754f95SThomas Garnier * randomization is enabled.
173fb754f95SThomas Garnier */
174167dcfc0SLorenzo Stoakes
175167dcfc0SLorenzo Stoakes #ifndef CONFIG_X86_5LEVEL
176167dcfc0SLorenzo Stoakes #define INIT_PGD_PAGE_TABLES 3
177fb754f95SThomas Garnier #else
178167dcfc0SLorenzo Stoakes #define INIT_PGD_PAGE_TABLES 4
179fb754f95SThomas Garnier #endif
180167dcfc0SLorenzo Stoakes
181167dcfc0SLorenzo Stoakes #ifndef CONFIG_RANDOMIZE_MEMORY
182167dcfc0SLorenzo Stoakes #define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES)
183167dcfc0SLorenzo Stoakes #else
184167dcfc0SLorenzo Stoakes #define INIT_PGD_PAGE_COUNT (4 * INIT_PGD_PAGE_TABLES)
185167dcfc0SLorenzo Stoakes #endif
186167dcfc0SLorenzo Stoakes
187fb754f95SThomas Garnier #define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
1888d57470dSYinghai Lu RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
early_alloc_pgt_buf(void)1898d57470dSYinghai Lu void __init early_alloc_pgt_buf(void)
1908d57470dSYinghai Lu {
1918d57470dSYinghai Lu unsigned long tables = INIT_PGT_BUF_SIZE;
1928d57470dSYinghai Lu phys_addr_t base;
1938d57470dSYinghai Lu
1948d57470dSYinghai Lu base = __pa(extend_brk(tables, PAGE_SIZE));
1958d57470dSYinghai Lu
1968d57470dSYinghai Lu pgt_buf_start = base >> PAGE_SHIFT;
1978d57470dSYinghai Lu pgt_buf_end = pgt_buf_start;
1988d57470dSYinghai Lu pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
1998d57470dSYinghai Lu }
2008d57470dSYinghai Lu
201f765090aSPekka Enberg int after_bootmem;
202f765090aSPekka Enberg
20310971ab2SIngo Molnar early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
204148b2098SYinghai Lu
205844ab6f9SJacob Shin struct map_range {
206844ab6f9SJacob Shin unsigned long start;
207844ab6f9SJacob Shin unsigned long end;
208844ab6f9SJacob Shin unsigned page_size_mask;
209844ab6f9SJacob Shin };
210844ab6f9SJacob Shin
211fa62aafeSYinghai Lu static int page_size_mask;
212f765090aSPekka Enberg
21396f59fe2SThomas Gleixner /*
21496f59fe2SThomas Gleixner * Save some of cr4 feature set we're using (e.g. Pentium 4MB
21596f59fe2SThomas Gleixner * enable and PPro Global page enable), so that any CPU's that boot
21696f59fe2SThomas Gleixner * up after us can get the correct flags. Invoked on the boot CPU.
21796f59fe2SThomas Gleixner */
cr4_set_bits_and_update_boot(unsigned long mask)21896f59fe2SThomas Gleixner static inline void cr4_set_bits_and_update_boot(unsigned long mask)
21996f59fe2SThomas Gleixner {
22096f59fe2SThomas Gleixner mmu_cr4_features |= mask;
22196f59fe2SThomas Gleixner if (trampoline_cr4_features)
22296f59fe2SThomas Gleixner *trampoline_cr4_features = mmu_cr4_features;
22396f59fe2SThomas Gleixner cr4_set_bits(mask);
22496f59fe2SThomas Gleixner }
22596f59fe2SThomas Gleixner
probe_page_size_mask(void)22622ddfcaaSYinghai Lu static void __init probe_page_size_mask(void)
227fa62aafeSYinghai Lu {
228fa62aafeSYinghai Lu /*
2294675ff05SLevin, Alexander (Sasha Levin) * For pagealloc debugging, identity mapping will use small pages.
230fa62aafeSYinghai Lu * This will simplify cpa(), which otherwise needs to support splitting
231fa62aafeSYinghai Lu * large pages into small in interrupt context, etc.
232fa62aafeSYinghai Lu */
2334675ff05SLevin, Alexander (Sasha Levin) if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
234fa62aafeSYinghai Lu page_size_mask |= 1 << PG_LEVEL_2M;
235d9ee35acSVlastimil Babka else
236d9ee35acSVlastimil Babka direct_gbpages = 0;
237fa62aafeSYinghai Lu
238fa62aafeSYinghai Lu /* Enable PSE if available */
23916bf9226SBorislav Petkov if (boot_cpu_has(X86_FEATURE_PSE))
240375074ccSAndy Lutomirski cr4_set_bits_and_update_boot(X86_CR4_PSE);
241fa62aafeSYinghai Lu
242fa62aafeSYinghai Lu /* Enable PGE if available */
243c313ec66SDave Hansen __supported_pte_mask &= ~_PAGE_GLOBAL;
244c109bf95SBorislav Petkov if (boot_cpu_has(X86_FEATURE_PGE)) {
245375074ccSAndy Lutomirski cr4_set_bits_and_update_boot(X86_CR4_PGE);
24639114b7aSDave Hansen __supported_pte_mask |= _PAGE_GLOBAL;
247c313ec66SDave Hansen }
248e61980a7SIngo Molnar
2498a57f484SDave Hansen /* By the default is everything supported: */
2508a57f484SDave Hansen __default_kernel_pte_mask = __supported_pte_mask;
2518a57f484SDave Hansen /* Except when with PTI where the kernel is mostly non-Global: */
2528a57f484SDave Hansen if (cpu_feature_enabled(X86_FEATURE_PTI))
2538a57f484SDave Hansen __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
2548a57f484SDave Hansen
255e61980a7SIngo Molnar /* Enable 1 GB linear kernel mappings if available: */
256b8291adcSBorislav Petkov if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
257e61980a7SIngo Molnar printk(KERN_INFO "Using GB pages for direct mapping\n");
258e61980a7SIngo Molnar page_size_mask |= 1 << PG_LEVEL_1G;
259e61980a7SIngo Molnar } else {
260e61980a7SIngo Molnar direct_gbpages = 0;
261e61980a7SIngo Molnar }
262fa62aafeSYinghai Lu }
263279b706bSStefano Stabellini
264ce0b15d1SDave Hansen /*
265*bb442549SXi Ruoyao * INVLPG may not properly flush Global entries on
266*bb442549SXi Ruoyao * these CPUs. New microcode fixes the issue.
267ce0b15d1SDave Hansen */
268ce0b15d1SDave Hansen static const struct x86_cpu_id invlpg_miss_ids[] = {
269*bb442549SXi Ruoyao X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 0x2e),
270*bb442549SXi Ruoyao X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 0x42c),
271*bb442549SXi Ruoyao X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, 0x11),
272*bb442549SXi Ruoyao X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 0x118),
273*bb442549SXi Ruoyao X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 0x4117),
274*bb442549SXi Ruoyao X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, 0x2e),
275ce0b15d1SDave Hansen {}
276ce0b15d1SDave Hansen };
277ce0b15d1SDave Hansen
setup_pcid(void)278c7ad5ad2SAndy Lutomirski static void setup_pcid(void)
279c7ad5ad2SAndy Lutomirski {
280*bb442549SXi Ruoyao const struct x86_cpu_id *invlpg_miss_match;
281*bb442549SXi Ruoyao
2826cff64b8SDave Hansen if (!IS_ENABLED(CONFIG_X86_64))
2836cff64b8SDave Hansen return;
2846cff64b8SDave Hansen
2856cff64b8SDave Hansen if (!boot_cpu_has(X86_FEATURE_PCID))
2866cff64b8SDave Hansen return;
2876cff64b8SDave Hansen
288*bb442549SXi Ruoyao invlpg_miss_match = x86_match_cpu(invlpg_miss_ids);
289*bb442549SXi Ruoyao
290*bb442549SXi Ruoyao if (invlpg_miss_match &&
291*bb442549SXi Ruoyao boot_cpu_data.microcode < invlpg_miss_match->driver_data) {
292ce0b15d1SDave Hansen pr_info("Incomplete global flushes, disabling PCID");
293ce0b15d1SDave Hansen setup_clear_cpu_cap(X86_FEATURE_PCID);
294ce0b15d1SDave Hansen return;
295ce0b15d1SDave Hansen }
296ce0b15d1SDave Hansen
297c7ad5ad2SAndy Lutomirski if (boot_cpu_has(X86_FEATURE_PGE)) {
298c7ad5ad2SAndy Lutomirski /*
2996cff64b8SDave Hansen * This can't be cr4_set_bits_and_update_boot() -- the
3006cff64b8SDave Hansen * trampoline code can't handle CR4.PCIDE and it wouldn't
3016cff64b8SDave Hansen * do any good anyway. Despite the name,
3026cff64b8SDave Hansen * cr4_set_bits_and_update_boot() doesn't actually cause
3036cff64b8SDave Hansen * the bits in question to remain set all the way through
3046cff64b8SDave Hansen * the secondary boot asm.
305c7ad5ad2SAndy Lutomirski *
3066cff64b8SDave Hansen * Instead, we brute-force it and set CR4.PCIDE manually in
3076cff64b8SDave Hansen * start_secondary().
308c7ad5ad2SAndy Lutomirski */
309c7ad5ad2SAndy Lutomirski cr4_set_bits(X86_CR4_PCIDE);
310c7ad5ad2SAndy Lutomirski } else {
311c7ad5ad2SAndy Lutomirski /*
3126cff64b8SDave Hansen * flush_tlb_all(), as currently implemented, won't work if
3136cff64b8SDave Hansen * PCID is on but PGE is not. Since that combination
3146cff64b8SDave Hansen * doesn't exist on real hardware, there's no reason to try
3156cff64b8SDave Hansen * to fully support it, but it's polite to avoid corrupting
3166cff64b8SDave Hansen * data if we're on an improperly configured VM.
317c7ad5ad2SAndy Lutomirski */
318c7ad5ad2SAndy Lutomirski setup_clear_cpu_cap(X86_FEATURE_PCID);
319c7ad5ad2SAndy Lutomirski }
320c7ad5ad2SAndy Lutomirski }
321c7ad5ad2SAndy Lutomirski
322f765090aSPekka Enberg #ifdef CONFIG_X86_32
323f765090aSPekka Enberg #define NR_RANGE_MR 3
324f765090aSPekka Enberg #else /* CONFIG_X86_64 */
325f765090aSPekka Enberg #define NR_RANGE_MR 5
326f765090aSPekka Enberg #endif
327f765090aSPekka Enberg
save_mr(struct map_range * mr,int nr_range,unsigned long start_pfn,unsigned long end_pfn,unsigned long page_size_mask)328dc9dd5ccSJan Beulich static int __meminit save_mr(struct map_range *mr, int nr_range,
329f765090aSPekka Enberg unsigned long start_pfn, unsigned long end_pfn,
330f765090aSPekka Enberg unsigned long page_size_mask)
331f765090aSPekka Enberg {
332f765090aSPekka Enberg if (start_pfn < end_pfn) {
333f765090aSPekka Enberg if (nr_range >= NR_RANGE_MR)
334f765090aSPekka Enberg panic("run out of range for init_memory_mapping\n");
335f765090aSPekka Enberg mr[nr_range].start = start_pfn<<PAGE_SHIFT;
336f765090aSPekka Enberg mr[nr_range].end = end_pfn<<PAGE_SHIFT;
337f765090aSPekka Enberg mr[nr_range].page_size_mask = page_size_mask;
338f765090aSPekka Enberg nr_range++;
339f765090aSPekka Enberg }
340f765090aSPekka Enberg
341f765090aSPekka Enberg return nr_range;
342f765090aSPekka Enberg }
343f765090aSPekka Enberg
344aeebe84cSYinghai Lu /*
345aeebe84cSYinghai Lu * adjust the page_size_mask for small range to go with
346aeebe84cSYinghai Lu * big page size instead small one if nearby are ram too.
347aeebe84cSYinghai Lu */
adjust_range_page_size_mask(struct map_range * mr,int nr_range)348bd721ea7SFabian Frederick static void __ref adjust_range_page_size_mask(struct map_range *mr,
349aeebe84cSYinghai Lu int nr_range)
350aeebe84cSYinghai Lu {
351aeebe84cSYinghai Lu int i;
352aeebe84cSYinghai Lu
353aeebe84cSYinghai Lu for (i = 0; i < nr_range; i++) {
354aeebe84cSYinghai Lu if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
355aeebe84cSYinghai Lu !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
356aeebe84cSYinghai Lu unsigned long start = round_down(mr[i].start, PMD_SIZE);
357aeebe84cSYinghai Lu unsigned long end = round_up(mr[i].end, PMD_SIZE);
358aeebe84cSYinghai Lu
359aeebe84cSYinghai Lu #ifdef CONFIG_X86_32
360aeebe84cSYinghai Lu if ((end >> PAGE_SHIFT) > max_low_pfn)
361aeebe84cSYinghai Lu continue;
362aeebe84cSYinghai Lu #endif
363aeebe84cSYinghai Lu
364aeebe84cSYinghai Lu if (memblock_is_region_memory(start, end - start))
365aeebe84cSYinghai Lu mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
366aeebe84cSYinghai Lu }
367aeebe84cSYinghai Lu if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
368aeebe84cSYinghai Lu !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
369aeebe84cSYinghai Lu unsigned long start = round_down(mr[i].start, PUD_SIZE);
370aeebe84cSYinghai Lu unsigned long end = round_up(mr[i].end, PUD_SIZE);
371aeebe84cSYinghai Lu
372aeebe84cSYinghai Lu if (memblock_is_region_memory(start, end - start))
373aeebe84cSYinghai Lu mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
374aeebe84cSYinghai Lu }
375aeebe84cSYinghai Lu }
376aeebe84cSYinghai Lu }
377aeebe84cSYinghai Lu
page_size_string(struct map_range * mr)378f15e0518SDave Hansen static const char *page_size_string(struct map_range *mr)
379f15e0518SDave Hansen {
380f15e0518SDave Hansen static const char str_1g[] = "1G";
381f15e0518SDave Hansen static const char str_2m[] = "2M";
382f15e0518SDave Hansen static const char str_4m[] = "4M";
383f15e0518SDave Hansen static const char str_4k[] = "4k";
384f15e0518SDave Hansen
385f15e0518SDave Hansen if (mr->page_size_mask & (1<<PG_LEVEL_1G))
386f15e0518SDave Hansen return str_1g;
387f15e0518SDave Hansen /*
388f15e0518SDave Hansen * 32-bit without PAE has a 4M large page size.
389f15e0518SDave Hansen * PG_LEVEL_2M is misnamed, but we can at least
390f15e0518SDave Hansen * print out the right size in the string.
391f15e0518SDave Hansen */
392f15e0518SDave Hansen if (IS_ENABLED(CONFIG_X86_32) &&
393f15e0518SDave Hansen !IS_ENABLED(CONFIG_X86_PAE) &&
394f15e0518SDave Hansen mr->page_size_mask & (1<<PG_LEVEL_2M))
395f15e0518SDave Hansen return str_4m;
396f15e0518SDave Hansen
397f15e0518SDave Hansen if (mr->page_size_mask & (1<<PG_LEVEL_2M))
398f15e0518SDave Hansen return str_2m;
399f15e0518SDave Hansen
400f15e0518SDave Hansen return str_4k;
401f15e0518SDave Hansen }
402f15e0518SDave Hansen
split_mem_range(struct map_range * mr,int nr_range,unsigned long start,unsigned long end)4034e33e065SYinghai Lu static int __meminit split_mem_range(struct map_range *mr, int nr_range,
4044e33e065SYinghai Lu unsigned long start,
405f765090aSPekka Enberg unsigned long end)
406f765090aSPekka Enberg {
4072e8059edSYinghai Lu unsigned long start_pfn, end_pfn, limit_pfn;
4081829ae9aSYinghai Lu unsigned long pfn;
4094e33e065SYinghai Lu int i;
410f765090aSPekka Enberg
4112e8059edSYinghai Lu limit_pfn = PFN_DOWN(end);
4122e8059edSYinghai Lu
413f765090aSPekka Enberg /* head if not big page alignment ? */
4141829ae9aSYinghai Lu pfn = start_pfn = PFN_DOWN(start);
415f765090aSPekka Enberg #ifdef CONFIG_X86_32
416f765090aSPekka Enberg /*
417f765090aSPekka Enberg * Don't use a large page for the first 2/4MB of memory
418f765090aSPekka Enberg * because there are often fixed size MTRRs in there
419f765090aSPekka Enberg * and overlapping MTRRs into large pages can cause
420f765090aSPekka Enberg * slowdowns.
421f765090aSPekka Enberg */
4221829ae9aSYinghai Lu if (pfn == 0)
42384d77001SYinghai Lu end_pfn = PFN_DOWN(PMD_SIZE);
424f765090aSPekka Enberg else
4251829ae9aSYinghai Lu end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
426f765090aSPekka Enberg #else /* CONFIG_X86_64 */
4271829ae9aSYinghai Lu end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
428f765090aSPekka Enberg #endif
4292e8059edSYinghai Lu if (end_pfn > limit_pfn)
4302e8059edSYinghai Lu end_pfn = limit_pfn;
431f765090aSPekka Enberg if (start_pfn < end_pfn) {
432f765090aSPekka Enberg nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
4331829ae9aSYinghai Lu pfn = end_pfn;
434f765090aSPekka Enberg }
435f765090aSPekka Enberg
436f765090aSPekka Enberg /* big page (2M) range */
4371829ae9aSYinghai Lu start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
438f765090aSPekka Enberg #ifdef CONFIG_X86_32
4392e8059edSYinghai Lu end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
440f765090aSPekka Enberg #else /* CONFIG_X86_64 */
4411829ae9aSYinghai Lu end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
4422e8059edSYinghai Lu if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
4432e8059edSYinghai Lu end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
444f765090aSPekka Enberg #endif
445f765090aSPekka Enberg
446f765090aSPekka Enberg if (start_pfn < end_pfn) {
447f765090aSPekka Enberg nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
448f765090aSPekka Enberg page_size_mask & (1<<PG_LEVEL_2M));
4491829ae9aSYinghai Lu pfn = end_pfn;
450f765090aSPekka Enberg }
451f765090aSPekka Enberg
452f765090aSPekka Enberg #ifdef CONFIG_X86_64
453f765090aSPekka Enberg /* big page (1G) range */
4541829ae9aSYinghai Lu start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
4552e8059edSYinghai Lu end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
456f765090aSPekka Enberg if (start_pfn < end_pfn) {
457f765090aSPekka Enberg nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
458f765090aSPekka Enberg page_size_mask &
459f765090aSPekka Enberg ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
4601829ae9aSYinghai Lu pfn = end_pfn;
461f765090aSPekka Enberg }
462f765090aSPekka Enberg
463f765090aSPekka Enberg /* tail is not big page (1G) alignment */
4641829ae9aSYinghai Lu start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
4652e8059edSYinghai Lu end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
466f765090aSPekka Enberg if (start_pfn < end_pfn) {
467f765090aSPekka Enberg nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
468f765090aSPekka Enberg page_size_mask & (1<<PG_LEVEL_2M));
4691829ae9aSYinghai Lu pfn = end_pfn;
470f765090aSPekka Enberg }
471f765090aSPekka Enberg #endif
472f765090aSPekka Enberg
473f765090aSPekka Enberg /* tail is not big page (2M) alignment */
4741829ae9aSYinghai Lu start_pfn = pfn;
4752e8059edSYinghai Lu end_pfn = limit_pfn;
476f765090aSPekka Enberg nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
477f765090aSPekka Enberg
4787de3d66bSYinghai Lu if (!after_bootmem)
4797de3d66bSYinghai Lu adjust_range_page_size_mask(mr, nr_range);
4807de3d66bSYinghai Lu
481f765090aSPekka Enberg /* try to merge same page size and continuous */
482f765090aSPekka Enberg for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
483f765090aSPekka Enberg unsigned long old_start;
484f765090aSPekka Enberg if (mr[i].end != mr[i+1].start ||
485f765090aSPekka Enberg mr[i].page_size_mask != mr[i+1].page_size_mask)
486f765090aSPekka Enberg continue;
487f765090aSPekka Enberg /* move it */
488f765090aSPekka Enberg old_start = mr[i].start;
489f765090aSPekka Enberg memmove(&mr[i], &mr[i+1],
490f765090aSPekka Enberg (nr_range - 1 - i) * sizeof(struct map_range));
491f765090aSPekka Enberg mr[i--].start = old_start;
492f765090aSPekka Enberg nr_range--;
493f765090aSPekka Enberg }
494f765090aSPekka Enberg
495f765090aSPekka Enberg for (i = 0; i < nr_range; i++)
496c9cdaeb2SDan Williams pr_debug(" [mem %#010lx-%#010lx] page %s\n",
497365811d6SBjorn Helgaas mr[i].start, mr[i].end - 1,
498f15e0518SDave Hansen page_size_string(&mr[i]));
499f765090aSPekka Enberg
5004e33e065SYinghai Lu return nr_range;
5014e33e065SYinghai Lu }
5024e33e065SYinghai Lu
50308b46d5dSIngo Molnar struct range pfn_mapped[E820_MAX_ENTRIES];
5040e691cf8SYinghai Lu int nr_pfn_mapped;
50566520ebcSJacob Shin
add_pfn_range_mapped(unsigned long start_pfn,unsigned long end_pfn)50666520ebcSJacob Shin static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
50766520ebcSJacob Shin {
50808b46d5dSIngo Molnar nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
50966520ebcSJacob Shin nr_pfn_mapped, start_pfn, end_pfn);
51008b46d5dSIngo Molnar nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
51166520ebcSJacob Shin
51266520ebcSJacob Shin max_pfn_mapped = max(max_pfn_mapped, end_pfn);
51366520ebcSJacob Shin
51466520ebcSJacob Shin if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
51566520ebcSJacob Shin max_low_pfn_mapped = max(max_low_pfn_mapped,
51666520ebcSJacob Shin min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
51766520ebcSJacob Shin }
51866520ebcSJacob Shin
pfn_range_is_mapped(unsigned long start_pfn,unsigned long end_pfn)51966520ebcSJacob Shin bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
52066520ebcSJacob Shin {
52166520ebcSJacob Shin int i;
52266520ebcSJacob Shin
52366520ebcSJacob Shin for (i = 0; i < nr_pfn_mapped; i++)
52466520ebcSJacob Shin if ((start_pfn >= pfn_mapped[i].start) &&
52566520ebcSJacob Shin (end_pfn <= pfn_mapped[i].end))
52666520ebcSJacob Shin return true;
52766520ebcSJacob Shin
52866520ebcSJacob Shin return false;
52966520ebcSJacob Shin }
53066520ebcSJacob Shin
5312086fe11SYinghai Lu /*
5324e33e065SYinghai Lu * Setup the direct mapping of the physical memory at PAGE_OFFSET.
5334e33e065SYinghai Lu * This runs before bootmem is initialized and gets pages directly from
5344e33e065SYinghai Lu * the physical memory. To access them they are temporarily mapped.
5354e33e065SYinghai Lu */
init_memory_mapping(unsigned long start,unsigned long end,pgprot_t prot)536bd721ea7SFabian Frederick unsigned long __ref init_memory_mapping(unsigned long start,
537c164fbb4SLogan Gunthorpe unsigned long end, pgprot_t prot)
5384e33e065SYinghai Lu {
5394e33e065SYinghai Lu struct map_range mr[NR_RANGE_MR];
5404e33e065SYinghai Lu unsigned long ret = 0;
5414e33e065SYinghai Lu int nr_range, i;
5424e33e065SYinghai Lu
543c9cdaeb2SDan Williams pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
5444e33e065SYinghai Lu start, end - 1);
5454e33e065SYinghai Lu
5464e33e065SYinghai Lu memset(mr, 0, sizeof(mr));
5474e33e065SYinghai Lu nr_range = split_mem_range(mr, 0, start, end);
5484e33e065SYinghai Lu
549f765090aSPekka Enberg for (i = 0; i < nr_range; i++)
550f765090aSPekka Enberg ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
551c164fbb4SLogan Gunthorpe mr[i].page_size_mask,
552c164fbb4SLogan Gunthorpe prot);
553f765090aSPekka Enberg
55466520ebcSJacob Shin add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
55566520ebcSJacob Shin
556c14fa0b6SYinghai Lu return ret >> PAGE_SHIFT;
557c14fa0b6SYinghai Lu }
558c14fa0b6SYinghai Lu
55966520ebcSJacob Shin /*
560cf8b166dSZhang Yanfei * We need to iterate through the E820 memory map and create direct mappings
56109821ff1SIngo Molnar * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply
562cf8b166dSZhang Yanfei * create direct mappings for all pfns from [0 to max_low_pfn) and
563cf8b166dSZhang Yanfei * [4GB to max_pfn) because of possible memory holes in high addresses
564cf8b166dSZhang Yanfei * that cannot be marked as UC by fixed/variable range MTRRs.
565cf8b166dSZhang Yanfei * Depending on the alignment of E820 ranges, this may possibly result
566cf8b166dSZhang Yanfei * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
567cf8b166dSZhang Yanfei *
568cf8b166dSZhang Yanfei * init_mem_mapping() calls init_range_memory_mapping() with big range.
569cf8b166dSZhang Yanfei * That range would have hole in the middle or ends, and only ram parts
570cf8b166dSZhang Yanfei * will be mapped in init_range_memory_mapping().
57166520ebcSJacob Shin */
init_range_memory_mapping(unsigned long r_start,unsigned long r_end)5728d57470dSYinghai Lu static unsigned long __init init_range_memory_mapping(
573b8fd39c0SYinghai Lu unsigned long r_start,
574b8fd39c0SYinghai Lu unsigned long r_end)
57566520ebcSJacob Shin {
57666520ebcSJacob Shin unsigned long start_pfn, end_pfn;
5778d57470dSYinghai Lu unsigned long mapped_ram_size = 0;
57866520ebcSJacob Shin int i;
57966520ebcSJacob Shin
58066520ebcSJacob Shin for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
581b8fd39c0SYinghai Lu u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
582b8fd39c0SYinghai Lu u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
583b8fd39c0SYinghai Lu if (start >= end)
58466520ebcSJacob Shin continue;
58566520ebcSJacob Shin
586c9b3234aSYinghai Lu /*
587c9b3234aSYinghai Lu * if it is overlapping with brk pgt, we need to
588c9b3234aSYinghai Lu * alloc pgt buf from memblock instead.
589c9b3234aSYinghai Lu */
590c9b3234aSYinghai Lu can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
591c9b3234aSYinghai Lu min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
592c164fbb4SLogan Gunthorpe init_memory_mapping(start, end, PAGE_KERNEL);
5938d57470dSYinghai Lu mapped_ram_size += end - start;
594c9b3234aSYinghai Lu can_use_brk_pgt = true;
59566520ebcSJacob Shin }
59666520ebcSJacob Shin
5978d57470dSYinghai Lu return mapped_ram_size;
5988d57470dSYinghai Lu }
5998d57470dSYinghai Lu
get_new_step_size(unsigned long step_size)6006979287aSYinghai Lu static unsigned long __init get_new_step_size(unsigned long step_size)
6016979287aSYinghai Lu {
6026979287aSYinghai Lu /*
603132978b9SJan Beulich * Initial mapped size is PMD_SIZE (2M).
6046979287aSYinghai Lu * We can not set step_size to be PUD_SIZE (1G) yet.
6056979287aSYinghai Lu * In worse case, when we cross the 1G boundary, and
6066979287aSYinghai Lu * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
607132978b9SJan Beulich * to map 1G range with PTE. Hence we use one less than the
608132978b9SJan Beulich * difference of page table level shifts.
6096979287aSYinghai Lu *
610132978b9SJan Beulich * Don't need to worry about overflow in the top-down case, on 32bit,
611132978b9SJan Beulich * when step_size is 0, round_down() returns 0 for start, and that
612132978b9SJan Beulich * turns it into 0x100000000ULL.
613132978b9SJan Beulich * In the bottom-up case, round_up(x, 0) returns 0 though too, which
614132978b9SJan Beulich * needs to be taken into consideration by the code below.
6156979287aSYinghai Lu */
616132978b9SJan Beulich return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
6176979287aSYinghai Lu }
6186979287aSYinghai Lu
6190167d7d8STang Chen /**
6200167d7d8STang Chen * memory_map_top_down - Map [map_start, map_end) top down
6210167d7d8STang Chen * @map_start: start address of the target memory range
6220167d7d8STang Chen * @map_end: end address of the target memory range
6230167d7d8STang Chen *
6240167d7d8STang Chen * This function will setup direct mapping for memory range
6250167d7d8STang Chen * [map_start, map_end) in top-down. That said, the page tables
6260167d7d8STang Chen * will be allocated at the end of the memory, and we map the
6270167d7d8STang Chen * memory in top-down.
6280167d7d8STang Chen */
memory_map_top_down(unsigned long map_start,unsigned long map_end)6290167d7d8STang Chen static void __init memory_map_top_down(unsigned long map_start,
6300167d7d8STang Chen unsigned long map_end)
631c14fa0b6SYinghai Lu {
632bab202abSLukas Bulwahn unsigned long real_end, last_start;
6338d57470dSYinghai Lu unsigned long step_size;
6348d57470dSYinghai Lu unsigned long addr;
6358d57470dSYinghai Lu unsigned long mapped_ram_size = 0;
636ab951937SYinghai Lu
637a7259df7SMike Rapoport /*
638a7259df7SMike Rapoport * Systems that have many reserved areas near top of the memory,
639a7259df7SMike Rapoport * e.g. QEMU with less than 1G RAM and EFI enabled, or Xen, will
640a7259df7SMike Rapoport * require lots of 4K mappings which may exhaust pgt_buf.
641a7259df7SMike Rapoport * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure
642a7259df7SMike Rapoport * there is enough mapped memory that can be allocated from
643a7259df7SMike Rapoport * memblock.
644a7259df7SMike Rapoport */
645a7259df7SMike Rapoport addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
646a7259df7SMike Rapoport map_end);
6473ecc6834SMike Rapoport memblock_phys_free(addr, PMD_SIZE);
6488d57470dSYinghai Lu real_end = addr + PMD_SIZE;
6498d57470dSYinghai Lu
6508d57470dSYinghai Lu /* step_size need to be small so pgt_buf from BRK could cover it */
6518d57470dSYinghai Lu step_size = PMD_SIZE;
6528d57470dSYinghai Lu max_pfn_mapped = 0; /* will get exact value next */
6538d57470dSYinghai Lu min_pfn_mapped = real_end >> PAGE_SHIFT;
654bab202abSLukas Bulwahn last_start = real_end;
655cf8b166dSZhang Yanfei
656cf8b166dSZhang Yanfei /*
657cf8b166dSZhang Yanfei * We start from the top (end of memory) and go to the bottom.
658cf8b166dSZhang Yanfei * The memblock_find_in_range() gets us a block of RAM from the
659cf8b166dSZhang Yanfei * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
660cf8b166dSZhang Yanfei * for page table.
661cf8b166dSZhang Yanfei */
6620167d7d8STang Chen while (last_start > map_start) {
663bab202abSLukas Bulwahn unsigned long start;
664bab202abSLukas Bulwahn
6658d57470dSYinghai Lu if (last_start > step_size) {
6668d57470dSYinghai Lu start = round_down(last_start - 1, step_size);
6670167d7d8STang Chen if (start < map_start)
6680167d7d8STang Chen start = map_start;
6698d57470dSYinghai Lu } else
6700167d7d8STang Chen start = map_start;
671132978b9SJan Beulich mapped_ram_size += init_range_memory_mapping(start,
6728d57470dSYinghai Lu last_start);
6738d57470dSYinghai Lu last_start = start;
6748d57470dSYinghai Lu min_pfn_mapped = last_start >> PAGE_SHIFT;
675132978b9SJan Beulich if (mapped_ram_size >= step_size)
6766979287aSYinghai Lu step_size = get_new_step_size(step_size);
6778d57470dSYinghai Lu }
6788d57470dSYinghai Lu
6790167d7d8STang Chen if (real_end < map_end)
6800167d7d8STang Chen init_range_memory_mapping(real_end, map_end);
6810167d7d8STang Chen }
6820167d7d8STang Chen
683b959ed6cSTang Chen /**
684b959ed6cSTang Chen * memory_map_bottom_up - Map [map_start, map_end) bottom up
685b959ed6cSTang Chen * @map_start: start address of the target memory range
686b959ed6cSTang Chen * @map_end: end address of the target memory range
687b959ed6cSTang Chen *
688b959ed6cSTang Chen * This function will setup direct mapping for memory range
689b959ed6cSTang Chen * [map_start, map_end) in bottom-up. Since we have limited the
690b959ed6cSTang Chen * bottom-up allocation above the kernel, the page tables will
691b959ed6cSTang Chen * be allocated just above the kernel and we map the memory
692b959ed6cSTang Chen * in [map_start, map_end) in bottom-up.
693b959ed6cSTang Chen */
memory_map_bottom_up(unsigned long map_start,unsigned long map_end)694b959ed6cSTang Chen static void __init memory_map_bottom_up(unsigned long map_start,
695b959ed6cSTang Chen unsigned long map_end)
696b959ed6cSTang Chen {
697132978b9SJan Beulich unsigned long next, start;
698b959ed6cSTang Chen unsigned long mapped_ram_size = 0;
699b959ed6cSTang Chen /* step_size need to be small so pgt_buf from BRK could cover it */
700b959ed6cSTang Chen unsigned long step_size = PMD_SIZE;
701b959ed6cSTang Chen
702b959ed6cSTang Chen start = map_start;
703b959ed6cSTang Chen min_pfn_mapped = start >> PAGE_SHIFT;
704b959ed6cSTang Chen
705b959ed6cSTang Chen /*
706b959ed6cSTang Chen * We start from the bottom (@map_start) and go to the top (@map_end).
707b959ed6cSTang Chen * The memblock_find_in_range() gets us a block of RAM from the
708b959ed6cSTang Chen * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
709b959ed6cSTang Chen * for page table.
710b959ed6cSTang Chen */
711b959ed6cSTang Chen while (start < map_end) {
712132978b9SJan Beulich if (step_size && map_end - start > step_size) {
713b959ed6cSTang Chen next = round_up(start + 1, step_size);
714b959ed6cSTang Chen if (next > map_end)
715b959ed6cSTang Chen next = map_end;
716132978b9SJan Beulich } else {
717b959ed6cSTang Chen next = map_end;
718132978b9SJan Beulich }
719b959ed6cSTang Chen
720132978b9SJan Beulich mapped_ram_size += init_range_memory_mapping(start, next);
721b959ed6cSTang Chen start = next;
722b959ed6cSTang Chen
723132978b9SJan Beulich if (mapped_ram_size >= step_size)
724b959ed6cSTang Chen step_size = get_new_step_size(step_size);
725b959ed6cSTang Chen }
726b959ed6cSTang Chen }
727b959ed6cSTang Chen
72888107d33SMike Rapoport /*
72988107d33SMike Rapoport * The real mode trampoline, which is required for bootstrapping CPUs
73088107d33SMike Rapoport * occupies only a small area under the low 1MB. See reserve_real_mode()
73188107d33SMike Rapoport * for details.
73288107d33SMike Rapoport *
73388107d33SMike Rapoport * If KASLR is disabled the first PGD entry of the direct mapping is copied
73488107d33SMike Rapoport * to map the real mode trampoline.
73588107d33SMike Rapoport *
73688107d33SMike Rapoport * If KASLR is enabled, copy only the PUD which covers the low 1MB
73788107d33SMike Rapoport * area. This limits the randomization granularity to 1GB for both 4-level
73888107d33SMike Rapoport * and 5-level paging.
73988107d33SMike Rapoport */
init_trampoline(void)74088107d33SMike Rapoport static void __init init_trampoline(void)
74188107d33SMike Rapoport {
74288107d33SMike Rapoport #ifdef CONFIG_X86_64
7439de49990SJoerg Roedel /*
7449de49990SJoerg Roedel * The code below will alias kernel page-tables in the user-range of the
7459de49990SJoerg Roedel * address space, including the Global bit. So global TLB entries will
7469de49990SJoerg Roedel * be created when using the trampoline page-table.
7479de49990SJoerg Roedel */
74888107d33SMike Rapoport if (!kaslr_memory_enabled())
74988107d33SMike Rapoport trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
75088107d33SMike Rapoport else
75188107d33SMike Rapoport init_trampoline_kaslr();
75288107d33SMike Rapoport #endif
75388107d33SMike Rapoport }
75488107d33SMike Rapoport
init_mem_mapping(void)7550167d7d8STang Chen void __init init_mem_mapping(void)
7560167d7d8STang Chen {
7570167d7d8STang Chen unsigned long end;
7580167d7d8STang Chen
759aa8c6248SThomas Gleixner pti_check_boottime_disable();
7600167d7d8STang Chen probe_page_size_mask();
761c7ad5ad2SAndy Lutomirski setup_pcid();
7620167d7d8STang Chen
7630167d7d8STang Chen #ifdef CONFIG_X86_64
7640167d7d8STang Chen end = max_pfn << PAGE_SHIFT;
7650167d7d8STang Chen #else
7660167d7d8STang Chen end = max_low_pfn << PAGE_SHIFT;
7670167d7d8STang Chen #endif
7680167d7d8STang Chen
7690167d7d8STang Chen /* the ISA range is always mapped regardless of memory holes */
770c164fbb4SLogan Gunthorpe init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
7710167d7d8STang Chen
772b234e8a0SThomas Garnier /* Init the trampoline, possibly with KASLR memory offset */
773b234e8a0SThomas Garnier init_trampoline();
774b234e8a0SThomas Garnier
775b959ed6cSTang Chen /*
776b959ed6cSTang Chen * If the allocation is in bottom-up direction, we setup direct mapping
777b959ed6cSTang Chen * in bottom-up, otherwise we setup direct mapping in top-down.
778b959ed6cSTang Chen */
779b959ed6cSTang Chen if (memblock_bottom_up()) {
780b959ed6cSTang Chen unsigned long kernel_end = __pa_symbol(_end);
781b959ed6cSTang Chen
782b959ed6cSTang Chen /*
783b959ed6cSTang Chen * we need two separate calls here. This is because we want to
784b959ed6cSTang Chen * allocate page tables above the kernel. So we first map
785b959ed6cSTang Chen * [kernel_end, end) to make memory above the kernel be mapped
786b959ed6cSTang Chen * as soon as possible. And then use page tables allocated above
787b959ed6cSTang Chen * the kernel to map [ISA_END_ADDRESS, kernel_end).
788b959ed6cSTang Chen */
789b959ed6cSTang Chen memory_map_bottom_up(kernel_end, end);
790b959ed6cSTang Chen memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
791b959ed6cSTang Chen } else {
7920167d7d8STang Chen memory_map_top_down(ISA_END_ADDRESS, end);
793b959ed6cSTang Chen }
7948d57470dSYinghai Lu
795f763ad1dSYinghai Lu #ifdef CONFIG_X86_64
796f763ad1dSYinghai Lu if (max_pfn > max_low_pfn) {
797163b0991SIngo Molnar /* can we preserve max_low_pfn ?*/
798f763ad1dSYinghai Lu max_low_pfn = max_pfn;
799f763ad1dSYinghai Lu }
800719272c4SYinghai Lu #else
801719272c4SYinghai Lu early_ioremap_page_table_range_init();
8028170e6beSH. Peter Anvin #endif
8038170e6beSH. Peter Anvin
804719272c4SYinghai Lu load_cr3(swapper_pg_dir);
805719272c4SYinghai Lu __flush_tlb_all();
806719272c4SYinghai Lu
807f72e38e8SJuergen Gross x86_init.hyper.init_mem_mapping();
808c138d811SJuergen Gross
809c14fa0b6SYinghai Lu early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
81022ddfcaaSYinghai Lu }
811e5b2bb55SPekka Enberg
812540aca06SPekka Enberg /*
8134fc19708SNadav Amit * Initialize an mm_struct to be used during poking and a pointer to be used
8144fc19708SNadav Amit * during patching.
8154fc19708SNadav Amit */
poking_init(void)8164fc19708SNadav Amit void __init poking_init(void)
8174fc19708SNadav Amit {
8184fc19708SNadav Amit spinlock_t *ptl;
8194fc19708SNadav Amit pte_t *ptep;
8204fc19708SNadav Amit
8213f4c8211SPeter Zijlstra poking_mm = mm_alloc();
8224fc19708SNadav Amit BUG_ON(!poking_mm);
8234fc19708SNadav Amit
82426ce6ec3SJuergen Gross /* Xen PV guests need the PGD to be pinned. */
825c9ae1b10SJuergen Gross paravirt_enter_mmap(poking_mm);
82626ce6ec3SJuergen Gross
8274fc19708SNadav Amit /*
8284fc19708SNadav Amit * Randomize the poking address, but make sure that the following page
8294fc19708SNadav Amit * will be mapped at the same PMD. We need 2 pages, so find space for 3,
8304fc19708SNadav Amit * and adjust the address if the PMD ends after the first one.
8314fc19708SNadav Amit */
8324fc19708SNadav Amit poking_addr = TASK_UNMAPPED_BASE;
8334fc19708SNadav Amit if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
8344fc19708SNadav Amit poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
8354fc19708SNadav Amit (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
8364fc19708SNadav Amit
8374fc19708SNadav Amit if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
8384fc19708SNadav Amit poking_addr += PAGE_SIZE;
8394fc19708SNadav Amit
8404fc19708SNadav Amit /*
8414fc19708SNadav Amit * We need to trigger the allocation of the page-tables that will be
8424fc19708SNadav Amit * needed for poking now. Later, poking may be performed in an atomic
8434fc19708SNadav Amit * section, which might cause allocation to fail.
8444fc19708SNadav Amit */
8454fc19708SNadav Amit ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
8464fc19708SNadav Amit BUG_ON(!ptep);
8474fc19708SNadav Amit pte_unmap_unlock(ptep, ptl);
8484fc19708SNadav Amit }
8494fc19708SNadav Amit
8504fc19708SNadav Amit /*
851540aca06SPekka Enberg * devmem_is_allowed() checks to see if /dev/mem access to a certain address
852540aca06SPekka Enberg * is valid. The argument is a physical page number.
853540aca06SPekka Enberg *
854a4866aa8SKees Cook * On x86, access has to be given to the first megabyte of RAM because that
855a4866aa8SKees Cook * area traditionally contains BIOS code and data regions used by X, dosemu,
856a4866aa8SKees Cook * and similar apps. Since they map the entire memory range, the whole range
857a4866aa8SKees Cook * must be allowed (for mapping), but any areas that would otherwise be
858a4866aa8SKees Cook * disallowed are flagged as being "zero filled" instead of rejected.
859a4866aa8SKees Cook * Access has to be given to non-kernel-ram areas as well, these contain the
860a4866aa8SKees Cook * PCI mmio resources as well as potential bios/acpi data regions.
861540aca06SPekka Enberg */
devmem_is_allowed(unsigned long pagenr)862540aca06SPekka Enberg int devmem_is_allowed(unsigned long pagenr)
863540aca06SPekka Enberg {
8642bdce744SDan Williams if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
8652bdce744SDan Williams IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
8662bdce744SDan Williams != REGION_DISJOINT) {
867a4866aa8SKees Cook /*
868a4866aa8SKees Cook * For disallowed memory regions in the low 1MB range,
869a4866aa8SKees Cook * request that the page be shown as all zeros.
870a4866aa8SKees Cook */
871a4866aa8SKees Cook if (pagenr < 256)
872a4866aa8SKees Cook return 2;
873a4866aa8SKees Cook
874a4866aa8SKees Cook return 0;
875a4866aa8SKees Cook }
876a4866aa8SKees Cook
877a4866aa8SKees Cook /*
878a4866aa8SKees Cook * This must follow RAM test, since System RAM is considered a
8799de76f41SLukas Bulwahn * restricted resource under CONFIG_STRICT_DEVMEM.
880a4866aa8SKees Cook */
881a4866aa8SKees Cook if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
882a4866aa8SKees Cook /* Low 1MB bypasses iomem restrictions. */
88373e8f3d7ST Makphaibulchoke if (pagenr < 256)
884540aca06SPekka Enberg return 1;
885a4866aa8SKees Cook
886540aca06SPekka Enberg return 0;
887a4866aa8SKees Cook }
888a4866aa8SKees Cook
889540aca06SPekka Enberg return 1;
890540aca06SPekka Enberg }
891540aca06SPekka Enberg
free_init_pages(const char * what,unsigned long begin,unsigned long end)892e5cb113fSAlexey Dobriyan void free_init_pages(const char *what, unsigned long begin, unsigned long end)
893e5b2bb55SPekka Enberg {
894c967da6aSYinghai Lu unsigned long begin_aligned, end_aligned;
895e5b2bb55SPekka Enberg
896c967da6aSYinghai Lu /* Make sure boundaries are page aligned */
897c967da6aSYinghai Lu begin_aligned = PAGE_ALIGN(begin);
898c967da6aSYinghai Lu end_aligned = end & PAGE_MASK;
899c967da6aSYinghai Lu
900c967da6aSYinghai Lu if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
901c967da6aSYinghai Lu begin = begin_aligned;
902c967da6aSYinghai Lu end = end_aligned;
903c967da6aSYinghai Lu }
904c967da6aSYinghai Lu
905c967da6aSYinghai Lu if (begin >= end)
906e5b2bb55SPekka Enberg return;
907e5b2bb55SPekka Enberg
908e5b2bb55SPekka Enberg /*
909e5b2bb55SPekka Enberg * If debugging page accesses then do not free this memory but
910e5b2bb55SPekka Enberg * mark them not present - any buggy init-section access will
911e5b2bb55SPekka Enberg * create a kernel page fault:
912e5b2bb55SPekka Enberg */
913a75e1f63SChristian Borntraeger if (debug_pagealloc_enabled()) {
914a75e1f63SChristian Borntraeger pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
915365811d6SBjorn Helgaas begin, end - 1);
9160d02113bSQian Cai /*
9170d02113bSQian Cai * Inform kmemleak about the hole in the memory since the
9180d02113bSQian Cai * corresponding pages will be unmapped.
9190d02113bSQian Cai */
9200d02113bSQian Cai kmemleak_free_part((void *)begin, end - begin);
921e5b2bb55SPekka Enberg set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
922a75e1f63SChristian Borntraeger } else {
923e5b2bb55SPekka Enberg /*
924e5b2bb55SPekka Enberg * We just marked the kernel text read only above, now that
925e5b2bb55SPekka Enberg * we are going to free part of that, we need to make that
9265bd5a452SMatthieu Castet * writeable and non-executable first.
927e5b2bb55SPekka Enberg */
9285bd5a452SMatthieu Castet set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
929e5b2bb55SPekka Enberg set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
930e5b2bb55SPekka Enberg
931a75e1f63SChristian Borntraeger free_reserved_area((void *)begin, (void *)end,
932a75e1f63SChristian Borntraeger POISON_FREE_INITMEM, what);
933a75e1f63SChristian Borntraeger }
934e5b2bb55SPekka Enberg }
935e5b2bb55SPekka Enberg
9366ea2738eSDave Hansen /*
9376ea2738eSDave Hansen * begin/end can be in the direct map or the "high kernel mapping"
9386ea2738eSDave Hansen * used for the kernel image only. free_init_pages() will do the
9396ea2738eSDave Hansen * right thing for either kind of address.
9406ea2738eSDave Hansen */
free_kernel_image_pages(const char * what,void * begin,void * end)9415494c3a6SKees Cook void free_kernel_image_pages(const char *what, void *begin, void *end)
9426ea2738eSDave Hansen {
943c40a56a7SDave Hansen unsigned long begin_ul = (unsigned long)begin;
944c40a56a7SDave Hansen unsigned long end_ul = (unsigned long)end;
945c40a56a7SDave Hansen unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
946c40a56a7SDave Hansen
9475494c3a6SKees Cook free_init_pages(what, begin_ul, end_ul);
948c40a56a7SDave Hansen
949c40a56a7SDave Hansen /*
950c40a56a7SDave Hansen * PTI maps some of the kernel into userspace. For performance,
951c40a56a7SDave Hansen * this includes some kernel areas that do not contain secrets.
952c40a56a7SDave Hansen * Those areas might be adjacent to the parts of the kernel image
953c40a56a7SDave Hansen * being freed, which may contain secrets. Remove the "high kernel
954c40a56a7SDave Hansen * image mapping" for these freed areas, ensuring they are not even
955c40a56a7SDave Hansen * potentially vulnerable to Meltdown regardless of the specific
956c40a56a7SDave Hansen * optimizations PTI is currently using.
957c40a56a7SDave Hansen *
958c40a56a7SDave Hansen * The "noalias" prevents unmapping the direct map alias which is
959c40a56a7SDave Hansen * needed to access the freed pages.
960c40a56a7SDave Hansen *
961c40a56a7SDave Hansen * This is only valid for 64bit kernels. 32bit has only one mapping
962c40a56a7SDave Hansen * which can't be treated in this way for obvious reasons.
963c40a56a7SDave Hansen */
964c40a56a7SDave Hansen if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
965c40a56a7SDave Hansen set_memory_np_noalias(begin_ul, len_pages);
9666ea2738eSDave Hansen }
9676ea2738eSDave Hansen
free_initmem(void)96818278229SDenys Vlasenko void __ref free_initmem(void)
969e5b2bb55SPekka Enberg {
9700c6fc11aSIngo Molnar e820__reallocate_tables();
97147533968SDenys Vlasenko
972b3f0907cSBrijesh Singh mem_encrypt_free_decrypted_mem();
973b3f0907cSBrijesh Singh
9745494c3a6SKees Cook free_kernel_image_pages("unused kernel image (initmem)",
9755494c3a6SKees Cook &__init_begin, &__init_end);
976e5b2bb55SPekka Enberg }
977731ddea6SPekka Enberg
978731ddea6SPekka Enberg #ifdef CONFIG_BLK_DEV_INITRD
free_initrd_mem(unsigned long start,unsigned long end)9790d26d1d8SJan Beulich void __init free_initrd_mem(unsigned long start, unsigned long end)
980731ddea6SPekka Enberg {
981cd745be8SFenghua Yu /*
982c967da6aSYinghai Lu * end could be not aligned, and We can not align that,
983d9f6e12fSIngo Molnar * decompressor could be confused by aligned initrd_end
984c967da6aSYinghai Lu * We already reserve the end partial page before in
985c967da6aSYinghai Lu * - i386_start_kernel()
986c967da6aSYinghai Lu * - x86_64_start_kernel()
987c967da6aSYinghai Lu * - relocate_initrd()
988c967da6aSYinghai Lu * So here We can do PAGE_ALIGN() safely to get partial page to be freed
989c967da6aSYinghai Lu */
990c88442ecSJiang Liu free_init_pages("initrd", start, PAGE_ALIGN(end));
991731ddea6SPekka Enberg }
992731ddea6SPekka Enberg #endif
99317623915SPekka Enberg
9944270fd8bSIngo Molnar /*
9954270fd8bSIngo Molnar * Calculate the precise size of the DMA zone (first 16 MB of RAM),
9964270fd8bSIngo Molnar * and pass it to the MM layer - to help it set zone watermarks more
9974270fd8bSIngo Molnar * accurately.
9984270fd8bSIngo Molnar *
9994270fd8bSIngo Molnar * Done on 64-bit systems only for the time being, although 32-bit systems
10004270fd8bSIngo Molnar * might benefit from this as well.
10014270fd8bSIngo Molnar */
memblock_find_dma_reserve(void)10024270fd8bSIngo Molnar void __init memblock_find_dma_reserve(void)
10034270fd8bSIngo Molnar {
10044270fd8bSIngo Molnar #ifdef CONFIG_X86_64
10054270fd8bSIngo Molnar u64 nr_pages = 0, nr_free_pages = 0;
10064270fd8bSIngo Molnar unsigned long start_pfn, end_pfn;
10074270fd8bSIngo Molnar phys_addr_t start_addr, end_addr;
10084270fd8bSIngo Molnar int i;
10094270fd8bSIngo Molnar u64 u;
10104270fd8bSIngo Molnar
10114270fd8bSIngo Molnar /*
10124270fd8bSIngo Molnar * Iterate over all memory ranges (free and reserved ones alike),
10134270fd8bSIngo Molnar * to calculate the total number of pages in the first 16 MB of RAM:
10144270fd8bSIngo Molnar */
10154270fd8bSIngo Molnar nr_pages = 0;
10164270fd8bSIngo Molnar for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
10174270fd8bSIngo Molnar start_pfn = min(start_pfn, MAX_DMA_PFN);
10184270fd8bSIngo Molnar end_pfn = min(end_pfn, MAX_DMA_PFN);
10194270fd8bSIngo Molnar
10204270fd8bSIngo Molnar nr_pages += end_pfn - start_pfn;
10214270fd8bSIngo Molnar }
10224270fd8bSIngo Molnar
10234270fd8bSIngo Molnar /*
10244270fd8bSIngo Molnar * Iterate over free memory ranges to calculate the number of free
10254270fd8bSIngo Molnar * pages in the DMA zone, while not counting potential partial
10264270fd8bSIngo Molnar * pages at the beginning or the end of the range:
10274270fd8bSIngo Molnar */
10284270fd8bSIngo Molnar nr_free_pages = 0;
10294270fd8bSIngo Molnar for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
10304270fd8bSIngo Molnar start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
10314270fd8bSIngo Molnar end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
10324270fd8bSIngo Molnar
10334270fd8bSIngo Molnar if (start_pfn < end_pfn)
10344270fd8bSIngo Molnar nr_free_pages += end_pfn - start_pfn;
10354270fd8bSIngo Molnar }
10364270fd8bSIngo Molnar
10374270fd8bSIngo Molnar set_dma_reserve(nr_pages - nr_free_pages);
10384270fd8bSIngo Molnar #endif
10394270fd8bSIngo Molnar }
10404270fd8bSIngo Molnar
zone_sizes_init(void)104117623915SPekka Enberg void __init zone_sizes_init(void)
104217623915SPekka Enberg {
104317623915SPekka Enberg unsigned long max_zone_pfns[MAX_NR_ZONES];
104417623915SPekka Enberg
104517623915SPekka Enberg memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
104617623915SPekka Enberg
104717623915SPekka Enberg #ifdef CONFIG_ZONE_DMA
1048c072b90cSXishi Qiu max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
104917623915SPekka Enberg #endif
105017623915SPekka Enberg #ifdef CONFIG_ZONE_DMA32
1051c072b90cSXishi Qiu max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
105217623915SPekka Enberg #endif
105317623915SPekka Enberg max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
105417623915SPekka Enberg #ifdef CONFIG_HIGHMEM
105517623915SPekka Enberg max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
105617623915SPekka Enberg #endif
105717623915SPekka Enberg
10589691a071SMike Rapoport free_area_init(max_zone_pfns);
105917623915SPekka Enberg }
106017623915SPekka Enberg
10612f4305b1SNadav Amit __visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
10623d28ebceSAndy Lutomirski .loaded_mm = &init_mm,
106310af6235SAndy Lutomirski .next_asid = 1,
10641e02ce4cSAndy Lutomirski .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
10651e02ce4cSAndy Lutomirski };
10661e02ce4cSAndy Lutomirski
106774c228d2SKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
106874c228d2SKirill A. Shutemov DEFINE_PER_CPU(u64, tlbstate_untag_mask);
106974c228d2SKirill A. Shutemov EXPORT_PER_CPU_SYMBOL(tlbstate_untag_mask);
107074c228d2SKirill A. Shutemov #endif
107174c228d2SKirill A. Shutemov
update_cache_mode_entry(unsigned entry,enum page_cache_mode cache)1072bd809af1SJuergen Gross void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
1073bd809af1SJuergen Gross {
1074bd809af1SJuergen Gross /* entry 0 MUST be WB (hardwired to speed up translations) */
1075bd809af1SJuergen Gross BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
1076bd809af1SJuergen Gross
1077bd809af1SJuergen Gross __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
1078bd809af1SJuergen Gross __pte2cachemode_tbl[entry] = cache;
1079bd809af1SJuergen Gross }
1080377eeaa8SAndi Kleen
1081792adb90SVlastimil Babka #ifdef CONFIG_SWAP
arch_max_swapfile_size(void)1082be45a490SPeter Xu unsigned long arch_max_swapfile_size(void)
1083377eeaa8SAndi Kleen {
1084377eeaa8SAndi Kleen unsigned long pages;
1085377eeaa8SAndi Kleen
1086377eeaa8SAndi Kleen pages = generic_max_swapfile_size();
1087377eeaa8SAndi Kleen
10885b5e4d62SMichal Hocko if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
1089377eeaa8SAndi Kleen /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
1090b0a182f8SVlastimil Babka unsigned long long l1tf_limit = l1tf_pfn_limit();
10911a7ed1baSVlastimil Babka /*
10921a7ed1baSVlastimil Babka * We encode swap offsets also with 3 bits below those for pfn
10931a7ed1baSVlastimil Babka * which makes the usable limit higher.
10941a7ed1baSVlastimil Babka */
10950d0f6249SVlastimil Babka #if CONFIG_PGTABLE_LEVELS > 2
10961a7ed1baSVlastimil Babka l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
10971a7ed1baSVlastimil Babka #endif
10989df95169SVlastimil Babka pages = min_t(unsigned long long, l1tf_limit, pages);
1099377eeaa8SAndi Kleen }
1100377eeaa8SAndi Kleen return pages;
1101377eeaa8SAndi Kleen }
1102792adb90SVlastimil Babka #endif
1103