xref: /openbmc/linux/arch/x86/mm/highmem_32.c (revision c8ed9fc9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/highmem.h>
3 #include <linux/export.h>
4 #include <linux/swap.h> /* for totalram_pages */
5 #include <linux/memblock.h>
6 
7 void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
8 {
9 	unsigned long vaddr;
10 	int idx, type;
11 
12 	type = kmap_atomic_idx_push();
13 	idx = type + KM_TYPE_NR*smp_processor_id();
14 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
15 	BUG_ON(!pte_none(*(kmap_pte-idx)));
16 	set_pte(kmap_pte-idx, mk_pte(page, prot));
17 	arch_flush_lazy_mmu_mode();
18 
19 	return (void *)vaddr;
20 }
21 EXPORT_SYMBOL(kmap_atomic_high_prot);
22 
23 /*
24  * This is the same as kmap_atomic() but can map memory that doesn't
25  * have a struct page associated with it.
26  */
27 void *kmap_atomic_pfn(unsigned long pfn)
28 {
29 	return kmap_atomic_prot_pfn(pfn, kmap_prot);
30 }
31 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
32 
33 void kunmap_atomic_high(void *kvaddr)
34 {
35 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
36 
37 	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
38 	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
39 		int idx, type;
40 
41 		type = kmap_atomic_idx();
42 		idx = type + KM_TYPE_NR * smp_processor_id();
43 
44 #ifdef CONFIG_DEBUG_HIGHMEM
45 		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
46 #endif
47 		/*
48 		 * Force other mappings to Oops if they'll try to access this
49 		 * pte without first remap it.  Keeping stale mappings around
50 		 * is a bad idea also, in case the page changes cacheability
51 		 * attributes or becomes a protected page in a hypervisor.
52 		 */
53 		kpte_clear_flush(kmap_pte-idx, vaddr);
54 		kmap_atomic_idx_pop();
55 		arch_flush_lazy_mmu_mode();
56 	}
57 #ifdef CONFIG_DEBUG_HIGHMEM
58 	else {
59 		BUG_ON(vaddr < PAGE_OFFSET);
60 		BUG_ON(vaddr >= (unsigned long)high_memory);
61 	}
62 #endif
63 }
64 EXPORT_SYMBOL(kunmap_atomic_high);
65 
66 void __init set_highmem_pages_init(void)
67 {
68 	struct zone *zone;
69 	int nid;
70 
71 	/*
72 	 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
73 	 * is invoked before memblock_free_all()
74 	 */
75 	reset_all_zones_managed_pages();
76 	for_each_zone(zone) {
77 		unsigned long zone_start_pfn, zone_end_pfn;
78 
79 		if (!is_highmem(zone))
80 			continue;
81 
82 		zone_start_pfn = zone->zone_start_pfn;
83 		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
84 
85 		nid = zone_to_nid(zone);
86 		printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
87 				zone->name, nid, zone_start_pfn, zone_end_pfn);
88 
89 		add_highpages_with_active_regions(nid, zone_start_pfn,
90 				 zone_end_pfn);
91 	}
92 }
93