xref: /openbmc/linux/arch/x86/mm/highmem_32.c (revision cff4fa84)
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4 
5 void *kmap(struct page *page)
6 {
7 	might_sleep();
8 	if (!PageHighMem(page))
9 		return page_address(page);
10 	return kmap_high(page);
11 }
12 EXPORT_SYMBOL(kmap);
13 
14 void kunmap(struct page *page)
15 {
16 	if (in_interrupt())
17 		BUG();
18 	if (!PageHighMem(page))
19 		return;
20 	kunmap_high(page);
21 }
22 EXPORT_SYMBOL(kunmap);
23 
24 /*
25  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
26  * no global lock is needed and because the kmap code must perform a global TLB
27  * invalidation when the kmap pool wraps.
28  *
29  * However when holding an atomic kmap it is not legal to sleep, so atomic
30  * kmaps are appropriate for short, tight code paths only.
31  */
32 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33 {
34 	unsigned long vaddr;
35 	int idx, type;
36 
37 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 	pagefault_disable();
39 
40 	if (!PageHighMem(page))
41 		return page_address(page);
42 
43 	type = kmap_atomic_idx_push();
44 	idx = type + KM_TYPE_NR*smp_processor_id();
45 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
46 	BUG_ON(!pte_none(*(kmap_pte-idx)));
47 	set_pte(kmap_pte-idx, mk_pte(page, prot));
48 
49 	return (void *)vaddr;
50 }
51 EXPORT_SYMBOL(kmap_atomic_prot);
52 
53 void *__kmap_atomic(struct page *page)
54 {
55 	return kmap_atomic_prot(page, kmap_prot);
56 }
57 EXPORT_SYMBOL(__kmap_atomic);
58 
59 /*
60  * This is the same as kmap_atomic() but can map memory that doesn't
61  * have a struct page associated with it.
62  */
63 void *kmap_atomic_pfn(unsigned long pfn)
64 {
65 	return kmap_atomic_prot_pfn(pfn, kmap_prot);
66 }
67 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
68 
69 void __kunmap_atomic(void *kvaddr)
70 {
71 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 
73 	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
74 	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
75 		int idx, type;
76 
77 		type = kmap_atomic_idx();
78 		idx = type + KM_TYPE_NR * smp_processor_id();
79 
80 #ifdef CONFIG_DEBUG_HIGHMEM
81 		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
82 #endif
83 		/*
84 		 * Force other mappings to Oops if they'll try to access this
85 		 * pte without first remap it.  Keeping stale mappings around
86 		 * is a bad idea also, in case the page changes cacheability
87 		 * attributes or becomes a protected page in a hypervisor.
88 		 */
89 		kpte_clear_flush(kmap_pte-idx, vaddr);
90 		kmap_atomic_idx_pop();
91 	}
92 #ifdef CONFIG_DEBUG_HIGHMEM
93 	else {
94 		BUG_ON(vaddr < PAGE_OFFSET);
95 		BUG_ON(vaddr >= (unsigned long)high_memory);
96 	}
97 #endif
98 
99 	pagefault_enable();
100 }
101 EXPORT_SYMBOL(__kunmap_atomic);
102 
103 struct page *kmap_atomic_to_page(void *ptr)
104 {
105 	unsigned long idx, vaddr = (unsigned long)ptr;
106 	pte_t *pte;
107 
108 	if (vaddr < FIXADDR_START)
109 		return virt_to_page(ptr);
110 
111 	idx = virt_to_fix(vaddr);
112 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
113 	return pte_page(*pte);
114 }
115 EXPORT_SYMBOL(kmap_atomic_to_page);
116 
117 void __init set_highmem_pages_init(void)
118 {
119 	struct zone *zone;
120 	int nid;
121 
122 	for_each_zone(zone) {
123 		unsigned long zone_start_pfn, zone_end_pfn;
124 
125 		if (!is_highmem(zone))
126 			continue;
127 
128 		zone_start_pfn = zone->zone_start_pfn;
129 		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
130 
131 		nid = zone_to_nid(zone);
132 		printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
133 				zone->name, nid, zone_start_pfn, zone_end_pfn);
134 
135 		add_highpages_with_active_regions(nid, zone_start_pfn,
136 				 zone_end_pfn);
137 	}
138 	totalram_pages += totalhigh_pages;
139 }
140