xref: /openbmc/linux/arch/mips/mm/highmem.c (revision fd589a8f)
1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <linux/smp.h>
4 #include <asm/fixmap.h>
5 #include <asm/tlbflush.h>
6 
7 static pte_t *kmap_pte;
8 
9 unsigned long highstart_pfn, highend_pfn;
10 
11 void *__kmap(struct page *page)
12 {
13 	void *addr;
14 
15 	might_sleep();
16 	if (!PageHighMem(page))
17 		return page_address(page);
18 	addr = kmap_high(page);
19 	flush_tlb_one((unsigned long)addr);
20 
21 	return addr;
22 }
23 EXPORT_SYMBOL(__kmap);
24 
25 void __kunmap(struct page *page)
26 {
27 	BUG_ON(in_interrupt());
28 	if (!PageHighMem(page))
29 		return;
30 	kunmap_high(page);
31 }
32 EXPORT_SYMBOL(__kunmap);
33 
34 /*
35  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
36  * no global lock is needed and because the kmap code must perform a global TLB
37  * invalidation when the kmap pool wraps.
38  *
39  * However when holding an atomic kmap is is not legal to sleep, so atomic
40  * kmaps are appropriate for short, tight code paths only.
41  */
42 
43 void *__kmap_atomic(struct page *page, enum km_type type)
44 {
45 	enum fixed_addresses idx;
46 	unsigned long vaddr;
47 
48 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
49 	pagefault_disable();
50 	if (!PageHighMem(page))
51 		return page_address(page);
52 
53 	debug_kmap_atomic(type);
54 	idx = type + KM_TYPE_NR*smp_processor_id();
55 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
56 #ifdef CONFIG_DEBUG_HIGHMEM
57 	BUG_ON(!pte_none(*(kmap_pte - idx)));
58 #endif
59 	set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
60 	local_flush_tlb_one((unsigned long)vaddr);
61 
62 	return (void*) vaddr;
63 }
64 EXPORT_SYMBOL(__kmap_atomic);
65 
66 void __kunmap_atomic(void *kvaddr, enum km_type type)
67 {
68 #ifdef CONFIG_DEBUG_HIGHMEM
69 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
70 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
71 
72 	if (vaddr < FIXADDR_START) { // FIXME
73 		pagefault_enable();
74 		return;
75 	}
76 
77 	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
78 
79 	/*
80 	 * force other mappings to Oops if they'll try to access
81 	 * this pte without first remap it
82 	 */
83 	pte_clear(&init_mm, vaddr, kmap_pte-idx);
84 	local_flush_tlb_one(vaddr);
85 #endif
86 
87 	pagefault_enable();
88 }
89 EXPORT_SYMBOL(__kunmap_atomic);
90 
91 /*
92  * This is the same as kmap_atomic() but can map memory that doesn't
93  * have a struct page associated with it.
94  */
95 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
96 {
97 	enum fixed_addresses idx;
98 	unsigned long vaddr;
99 
100 	pagefault_disable();
101 
102 	debug_kmap_atomic(type);
103 	idx = type + KM_TYPE_NR*smp_processor_id();
104 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
105 	set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
106 	flush_tlb_one(vaddr);
107 
108 	return (void*) vaddr;
109 }
110 
111 struct page *__kmap_atomic_to_page(void *ptr)
112 {
113 	unsigned long idx, vaddr = (unsigned long)ptr;
114 	pte_t *pte;
115 
116 	if (vaddr < FIXADDR_START)
117 		return virt_to_page(ptr);
118 
119 	idx = virt_to_fix(vaddr);
120 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
121 	return pte_page(*pte);
122 }
123 
124 void __init kmap_init(void)
125 {
126 	unsigned long kmap_vstart;
127 
128 	/* cache the first kmap pte */
129 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
130 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
131 }
132