1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/module.h> 5 #include <linux/highmem.h> 6 #include <linux/smp.h> 7 #include <linux/memblock.h> 8 #include <asm/fixmap.h> 9 #include <asm/tlbflush.h> 10 #include <asm/cacheflush.h> 11 12 static pte_t *kmap_pte; 13 14 unsigned long highstart_pfn, highend_pfn; 15 16 void *kmap(struct page *page) 17 { 18 void *addr; 19 20 might_sleep(); 21 if (!PageHighMem(page)) 22 return page_address(page); 23 addr = kmap_high(page); 24 flush_tlb_one((unsigned long)addr); 25 26 return addr; 27 } 28 EXPORT_SYMBOL(kmap); 29 30 void kunmap(struct page *page) 31 { 32 BUG_ON(in_interrupt()); 33 if (!PageHighMem(page)) 34 return; 35 kunmap_high(page); 36 } 37 EXPORT_SYMBOL(kunmap); 38 39 void *kmap_atomic(struct page *page) 40 { 41 unsigned long vaddr; 42 int idx, type; 43 44 preempt_disable(); 45 pagefault_disable(); 46 if (!PageHighMem(page)) 47 return page_address(page); 48 49 type = kmap_atomic_idx_push(); 50 idx = type + KM_TYPE_NR*smp_processor_id(); 51 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 52 #ifdef CONFIG_DEBUG_HIGHMEM 53 BUG_ON(!pte_none(*(kmap_pte - idx))); 54 #endif 55 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); 56 flush_tlb_one((unsigned long)vaddr); 57 58 return (void *)vaddr; 59 } 60 EXPORT_SYMBOL(kmap_atomic); 61 62 void __kunmap_atomic(void *kvaddr) 63 { 64 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 65 int idx; 66 67 if (vaddr < FIXADDR_START) 68 goto out; 69 70 #ifdef CONFIG_DEBUG_HIGHMEM 71 idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx(); 72 73 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 74 75 pte_clear(&init_mm, vaddr, kmap_pte - idx); 76 flush_tlb_one(vaddr); 77 #else 78 (void) idx; /* to kill a warning */ 79 #endif 80 kmap_atomic_idx_pop(); 81 out: 82 pagefault_enable(); 83 preempt_enable(); 84 } 85 EXPORT_SYMBOL(__kunmap_atomic); 86 87 /* 88 * This is the same as kmap_atomic() but can map memory that doesn't 89 * have a struct page associated with it. 90 */ 91 void *kmap_atomic_pfn(unsigned long pfn) 92 { 93 unsigned long vaddr; 94 int idx, type; 95 96 pagefault_disable(); 97 98 type = kmap_atomic_idx_push(); 99 idx = type + KM_TYPE_NR*smp_processor_id(); 100 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 101 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 102 flush_tlb_one(vaddr); 103 104 return (void *) vaddr; 105 } 106 107 struct page *kmap_atomic_to_page(void *ptr) 108 { 109 unsigned long idx, vaddr = (unsigned long)ptr; 110 pte_t *pte; 111 112 if (vaddr < FIXADDR_START) 113 return virt_to_page(ptr); 114 115 idx = virt_to_fix(vaddr); 116 pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 117 return pte_page(*pte); 118 } 119 120 static void __init kmap_pages_init(void) 121 { 122 unsigned long vaddr; 123 pgd_t *pgd; 124 pmd_t *pmd; 125 pud_t *pud; 126 pte_t *pte; 127 128 vaddr = PKMAP_BASE; 129 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); 130 131 pgd = swapper_pg_dir + __pgd_offset(vaddr); 132 pud = (pud_t *)pgd; 133 pmd = pmd_offset(pud, vaddr); 134 pte = pte_offset_kernel(pmd, vaddr); 135 pkmap_page_table = pte; 136 } 137 138 void __init kmap_init(void) 139 { 140 unsigned long vaddr; 141 142 kmap_pages_init(); 143 144 vaddr = __fix_to_virt(FIX_KMAP_BEGIN); 145 146 kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); 147 } 148