1 /* 2 * Copyright © 2008 Ingo Molnar 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License along 15 * with this program; if not, write to the Free Software Foundation, Inc., 16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 17 */ 18 19 #include <asm/iomap.h> 20 #include <asm/pat.h> 21 #include <linux/module.h> 22 #include <linux/highmem.h> 23 24 static int is_io_mapping_possible(resource_size_t base, unsigned long size) 25 { 26 #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT) 27 /* There is no way to map greater than 1 << 32 address without PAE */ 28 if (base + size > 0x100000000ULL) 29 return 0; 30 #endif 31 return 1; 32 } 33 34 int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) 35 { 36 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC; 37 int ret; 38 39 if (!is_io_mapping_possible(base, size)) 40 return -EINVAL; 41 42 ret = io_reserve_memtype(base, base + size, &pcm); 43 if (ret) 44 return ret; 45 46 *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm)); 47 return 0; 48 } 49 EXPORT_SYMBOL_GPL(iomap_create_wc); 50 51 void iomap_free(resource_size_t base, unsigned long size) 52 { 53 io_free_memtype(base, base + size); 54 } 55 EXPORT_SYMBOL_GPL(iomap_free); 56 57 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) 58 { 59 unsigned long vaddr; 60 int idx, type; 61 62 pagefault_disable(); 63 64 type = kmap_atomic_idx_push(); 65 idx = type + KM_TYPE_NR * smp_processor_id(); 66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 67 set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); 68 arch_flush_lazy_mmu_mode(); 69 70 return (void *)vaddr; 71 } 72 73 /* 74 * Map 'pfn' using protections 'prot' 75 */ 76 void __iomem * 77 iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) 78 { 79 /* 80 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. 81 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the 82 * MTRR is UC or WC. UC_MINUS gets the real intention, of the 83 * user, which is "WC if the MTRR is WC, UC if you can't do that." 84 */ 85 if (!pat_enabled && pgprot_val(prot) == 86 (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC))) 87 prot = __pgprot(__PAGE_KERNEL | 88 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); 89 90 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); 91 } 92 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 93 94 void 95 iounmap_atomic(void __iomem *kvaddr) 96 { 97 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 98 99 if (vaddr >= __fix_to_virt(FIX_KMAP_END) && 100 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { 101 int idx, type; 102 103 type = kmap_atomic_idx(); 104 idx = type + KM_TYPE_NR * smp_processor_id(); 105 106 #ifdef CONFIG_DEBUG_HIGHMEM 107 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 108 #endif 109 /* 110 * Force other mappings to Oops if they'll try to access this 111 * pte without first remap it. Keeping stale mappings around 112 * is a bad idea also, in case the page changes cacheability 113 * attributes or becomes a protected page in a hypervisor. 114 */ 115 kpte_clear_flush(kmap_pte-idx, vaddr); 116 kmap_atomic_idx_pop(); 117 } 118 119 pagefault_enable(); 120 } 121 EXPORT_SYMBOL_GPL(iounmap_atomic); 122