1 #include <linux/module.h> 2 #include <linux/highmem.h> 3 #include <asm/tlbflush.h> 4 5 void *__kmap(struct page *page) 6 { 7 void *addr; 8 9 might_sleep(); 10 if (!PageHighMem(page)) 11 return page_address(page); 12 addr = kmap_high(page); 13 flush_tlb_one((unsigned long)addr); 14 15 return addr; 16 } 17 18 void __kunmap(struct page *page) 19 { 20 if (in_interrupt()) 21 BUG(); 22 if (!PageHighMem(page)) 23 return; 24 kunmap_high(page); 25 } 26 27 /* 28 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 29 * no global lock is needed and because the kmap code must perform a global TLB 30 * invalidation when the kmap pool wraps. 31 * 32 * However when holding an atomic kmap is is not legal to sleep, so atomic 33 * kmaps are appropriate for short, tight code paths only. 34 */ 35 36 void *__kmap_atomic(struct page *page, enum km_type type) 37 { 38 enum fixed_addresses idx; 39 unsigned long vaddr; 40 41 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 42 pagefault_disable(); 43 if (!PageHighMem(page)) 44 return page_address(page); 45 46 idx = type + KM_TYPE_NR*smp_processor_id(); 47 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 48 #ifdef CONFIG_DEBUG_HIGHMEM 49 if (!pte_none(*(kmap_pte-idx))) 50 BUG(); 51 #endif 52 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); 53 local_flush_tlb_one((unsigned long)vaddr); 54 55 return (void*) vaddr; 56 } 57 58 void __kunmap_atomic(void *kvaddr, enum km_type type) 59 { 60 #ifdef CONFIG_DEBUG_HIGHMEM 61 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 62 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 63 64 if (vaddr < FIXADDR_START) { // FIXME 65 pagefault_enable(); 66 return; 67 } 68 69 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) 70 BUG(); 71 72 /* 73 * force other mappings to Oops if they'll try to access 74 * this pte without first remap it 75 */ 76 pte_clear(&init_mm, vaddr, kmap_pte-idx); 77 local_flush_tlb_one(vaddr); 78 #endif 79 80 pagefault_enable(); 81 } 82 83 /* 84 * This is the same as kmap_atomic() but can map memory that doesn't 85 * have a struct page associated with it. 86 */ 87 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 88 { 89 enum fixed_addresses idx; 90 unsigned long vaddr; 91 92 pagefault_disable(); 93 94 idx = type + KM_TYPE_NR*smp_processor_id(); 95 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 96 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); 97 flush_tlb_one(vaddr); 98 99 return (void*) vaddr; 100 } 101 102 struct page *__kmap_atomic_to_page(void *ptr) 103 { 104 unsigned long idx, vaddr = (unsigned long)ptr; 105 pte_t *pte; 106 107 if (vaddr < FIXADDR_START) 108 return virt_to_page(ptr); 109 110 idx = virt_to_fix(vaddr); 111 pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 112 return pte_page(*pte); 113 } 114 115 EXPORT_SYMBOL(__kmap); 116 EXPORT_SYMBOL(__kunmap); 117 EXPORT_SYMBOL(__kmap_atomic); 118 EXPORT_SYMBOL(__kunmap_atomic); 119 EXPORT_SYMBOL(__kmap_atomic_to_page); 120