1 #include <linux/compiler.h> 2 #include <linux/module.h> 3 #include <linux/highmem.h> 4 #include <linux/sched.h> 5 #include <linux/smp.h> 6 #include <asm/fixmap.h> 7 #include <asm/tlbflush.h> 8 9 static pte_t *kmap_pte; 10 11 unsigned long highstart_pfn, highend_pfn; 12 13 void *kmap(struct page *page) 14 { 15 void *addr; 16 17 might_sleep(); 18 if (!PageHighMem(page)) 19 return page_address(page); 20 addr = kmap_high(page); 21 flush_tlb_one((unsigned long)addr); 22 23 return addr; 24 } 25 EXPORT_SYMBOL(kmap); 26 27 void kunmap(struct page *page) 28 { 29 BUG_ON(in_interrupt()); 30 if (!PageHighMem(page)) 31 return; 32 kunmap_high(page); 33 } 34 EXPORT_SYMBOL(kunmap); 35 36 /* 37 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 38 * no global lock is needed and because the kmap code must perform a global TLB 39 * invalidation when the kmap pool wraps. 40 * 41 * However when holding an atomic kmap is is not legal to sleep, so atomic 42 * kmaps are appropriate for short, tight code paths only. 43 */ 44 45 void *kmap_atomic(struct page *page) 46 { 47 unsigned long vaddr; 48 int idx, type; 49 50 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 51 pagefault_disable(); 52 if (!PageHighMem(page)) 53 return page_address(page); 54 55 type = kmap_atomic_idx_push(); 56 idx = type + KM_TYPE_NR*smp_processor_id(); 57 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 58 #ifdef CONFIG_DEBUG_HIGHMEM 59 BUG_ON(!pte_none(*(kmap_pte - idx))); 60 #endif 61 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); 62 local_flush_tlb_one((unsigned long)vaddr); 63 64 return (void*) vaddr; 65 } 66 EXPORT_SYMBOL(kmap_atomic); 67 68 void __kunmap_atomic(void *kvaddr) 69 { 70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 71 int type __maybe_unused; 72 73 if (vaddr < FIXADDR_START) { // FIXME 74 pagefault_enable(); 75 return; 76 } 77 78 type = kmap_atomic_idx(); 79 #ifdef CONFIG_DEBUG_HIGHMEM 80 { 81 int idx = type + KM_TYPE_NR * smp_processor_id(); 82 83 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 84 85 /* 86 * force other mappings to Oops if they'll try to access 87 * this pte without first remap it 88 */ 89 pte_clear(&init_mm, vaddr, kmap_pte-idx); 90 local_flush_tlb_one(vaddr); 91 } 92 #endif 93 kmap_atomic_idx_pop(); 94 pagefault_enable(); 95 } 96 EXPORT_SYMBOL(__kunmap_atomic); 97 98 /* 99 * This is the same as kmap_atomic() but can map memory that doesn't 100 * have a struct page associated with it. 101 */ 102 void *kmap_atomic_pfn(unsigned long pfn) 103 { 104 unsigned long vaddr; 105 int idx, type; 106 107 pagefault_disable(); 108 109 type = kmap_atomic_idx_push(); 110 idx = type + KM_TYPE_NR*smp_processor_id(); 111 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 112 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 113 flush_tlb_one(vaddr); 114 115 return (void*) vaddr; 116 } 117 118 struct page *kmap_atomic_to_page(void *ptr) 119 { 120 unsigned long idx, vaddr = (unsigned long)ptr; 121 pte_t *pte; 122 123 if (vaddr < FIXADDR_START) 124 return virt_to_page(ptr); 125 126 idx = virt_to_fix(vaddr); 127 pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 128 return pte_page(*pte); 129 } 130 131 void __init kmap_init(void) 132 { 133 unsigned long kmap_vstart; 134 135 /* cache the first kmap pte */ 136 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 137 kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 138 } 139