xref: /openbmc/linux/arch/mips/mm/highmem.c (revision 1fa6ac37)
1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/smp.h>
5 #include <asm/fixmap.h>
6 #include <asm/tlbflush.h>
7 
8 static pte_t *kmap_pte;
9 
10 unsigned long highstart_pfn, highend_pfn;
11 
12 void *__kmap(struct page *page)
13 {
14 	void *addr;
15 
16 	might_sleep();
17 	if (!PageHighMem(page))
18 		return page_address(page);
19 	addr = kmap_high(page);
20 	flush_tlb_one((unsigned long)addr);
21 
22 	return addr;
23 }
24 EXPORT_SYMBOL(__kmap);
25 
26 void __kunmap(struct page *page)
27 {
28 	BUG_ON(in_interrupt());
29 	if (!PageHighMem(page))
30 		return;
31 	kunmap_high(page);
32 }
33 EXPORT_SYMBOL(__kunmap);
34 
35 /*
36  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
37  * no global lock is needed and because the kmap code must perform a global TLB
38  * invalidation when the kmap pool wraps.
39  *
40  * However when holding an atomic kmap is is not legal to sleep, so atomic
41  * kmaps are appropriate for short, tight code paths only.
42  */
43 
44 void *__kmap_atomic(struct page *page, enum km_type type)
45 {
46 	enum fixed_addresses idx;
47 	unsigned long vaddr;
48 
49 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
50 	pagefault_disable();
51 	if (!PageHighMem(page))
52 		return page_address(page);
53 
54 	debug_kmap_atomic(type);
55 	idx = type + KM_TYPE_NR*smp_processor_id();
56 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
57 #ifdef CONFIG_DEBUG_HIGHMEM
58 	BUG_ON(!pte_none(*(kmap_pte - idx)));
59 #endif
60 	set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
61 	local_flush_tlb_one((unsigned long)vaddr);
62 
63 	return (void*) vaddr;
64 }
65 EXPORT_SYMBOL(__kmap_atomic);
66 
67 void __kunmap_atomic(void *kvaddr, enum km_type type)
68 {
69 #ifdef CONFIG_DEBUG_HIGHMEM
70 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
71 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
72 
73 	if (vaddr < FIXADDR_START) { // FIXME
74 		pagefault_enable();
75 		return;
76 	}
77 
78 	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
79 
80 	/*
81 	 * force other mappings to Oops if they'll try to access
82 	 * this pte without first remap it
83 	 */
84 	pte_clear(&init_mm, vaddr, kmap_pte-idx);
85 	local_flush_tlb_one(vaddr);
86 #endif
87 
88 	pagefault_enable();
89 }
90 EXPORT_SYMBOL(__kunmap_atomic);
91 
92 /*
93  * This is the same as kmap_atomic() but can map memory that doesn't
94  * have a struct page associated with it.
95  */
96 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
97 {
98 	enum fixed_addresses idx;
99 	unsigned long vaddr;
100 
101 	pagefault_disable();
102 
103 	debug_kmap_atomic(type);
104 	idx = type + KM_TYPE_NR*smp_processor_id();
105 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
106 	set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
107 	flush_tlb_one(vaddr);
108 
109 	return (void*) vaddr;
110 }
111 
112 struct page *__kmap_atomic_to_page(void *ptr)
113 {
114 	unsigned long idx, vaddr = (unsigned long)ptr;
115 	pte_t *pte;
116 
117 	if (vaddr < FIXADDR_START)
118 		return virt_to_page(ptr);
119 
120 	idx = virt_to_fix(vaddr);
121 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
122 	return pte_page(*pte);
123 }
124 
125 void __init kmap_init(void)
126 {
127 	unsigned long kmap_vstart;
128 
129 	/* cache the first kmap pte */
130 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
131 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
132 }
133