11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/arch/arm/mm/ioremap.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Re-map IO memory to kernel address space so that we can access it. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * (C) Copyright 1995 1996 Linus Torvalds 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Hacked for ARM by Phil Blundell <philb@gnu.org> 91da177e4SLinus Torvalds * Hacked to allow all architectures to build, and various cleanups 101da177e4SLinus Torvalds * by Russell King 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * This allows a driver to remap an arbitrary region of bus memory into 131da177e4SLinus Torvalds * virtual space. One should *only* use readl, writel, memcpy_toio and 141da177e4SLinus Torvalds * so on with such remapped areas. 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Because the ARM only has a 32-bit address space we can't address the 171da177e4SLinus Torvalds * whole of the (physical) PCI space at once. PCI huge-mode addressing 181da177e4SLinus Torvalds * allows us to circumvent this restriction by splitting PCI space into 191da177e4SLinus Torvalds * two 2GB chunks and mapping only one at a time into processor memory. 201da177e4SLinus Torvalds * We use MMU protection domains to trap any attempt to access the bank 211da177e4SLinus Torvalds * that is not currently mapped. (This isn't fully implemented yet.) 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds #include <linux/module.h> 241da177e4SLinus Torvalds #include <linux/errno.h> 251da177e4SLinus Torvalds #include <linux/mm.h> 261da177e4SLinus Torvalds #include <linux/vmalloc.h> 27fced80c7SRussell King #include <linux/io.h> 281da177e4SLinus Torvalds 290ba8b9b2SRussell King #include <asm/cputype.h> 301da177e4SLinus Torvalds #include <asm/cacheflush.h> 31ff0daca5SRussell King #include <asm/mmu_context.h> 32ff0daca5SRussell King #include <asm/pgalloc.h> 331da177e4SLinus Torvalds #include <asm/tlbflush.h> 34ff0daca5SRussell King #include <asm/sizes.h> 35ff0daca5SRussell King 36b29e9f5eSRussell King #include <asm/mach/map.h> 37b29e9f5eSRussell King #include "mm.h" 38b29e9f5eSRussell King 39ff0daca5SRussell King /* 40a069c896SLennert Buytenhek * Used by ioremap() and iounmap() code to mark (super)section-mapped 41a069c896SLennert Buytenhek * I/O regions in vm_struct->flags field. 42ff0daca5SRussell King */ 43ff0daca5SRussell King #define VM_ARM_SECTION_MAPPING 0x80000000 441da177e4SLinus Torvalds 4569d3a84aSHiroshi DOYU int ioremap_page(unsigned long virt, unsigned long phys, 4669d3a84aSHiroshi DOYU const struct mem_type *mtype) 4769d3a84aSHiroshi DOYU { 48d7461963SRussell King return ioremap_page_range(virt, virt + PAGE_SIZE, phys, 49d7461963SRussell King __pgprot(mtype->prot_pte)); 5069d3a84aSHiroshi DOYU } 5169d3a84aSHiroshi DOYU EXPORT_SYMBOL(ioremap_page); 52ff0daca5SRussell King 53ff0daca5SRussell King void __check_kvm_seq(struct mm_struct *mm) 54ff0daca5SRussell King { 55ff0daca5SRussell King unsigned int seq; 56ff0daca5SRussell King 57ff0daca5SRussell King do { 58ff0daca5SRussell King seq = init_mm.context.kvm_seq; 59ff0daca5SRussell King memcpy(pgd_offset(mm, VMALLOC_START), 60ff0daca5SRussell King pgd_offset_k(VMALLOC_START), 61ff0daca5SRussell King sizeof(pgd_t) * (pgd_index(VMALLOC_END) - 62ff0daca5SRussell King pgd_index(VMALLOC_START))); 63ff0daca5SRussell King mm->context.kvm_seq = seq; 64ff0daca5SRussell King } while (seq != init_mm.context.kvm_seq); 65ff0daca5SRussell King } 66ff0daca5SRussell King 67ff0daca5SRussell King #ifndef CONFIG_SMP 68ff0daca5SRussell King /* 69ff0daca5SRussell King * Section support is unsafe on SMP - If you iounmap and ioremap a region, 70ff0daca5SRussell King * the other CPUs will not see this change until their next context switch. 71ff0daca5SRussell King * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs 72ff0daca5SRussell King * which requires the new ioremap'd region to be referenced, the CPU will 73ff0daca5SRussell King * reference the _old_ region. 74ff0daca5SRussell King * 7531aa8fd6SRussell King * Note that get_vm_area_caller() allocates a guard 4K page, so we need to 7631aa8fd6SRussell King * mask the size back to 1MB aligned or we will overflow in the loop below. 77ff0daca5SRussell King */ 78ff0daca5SRussell King static void unmap_area_sections(unsigned long virt, unsigned long size) 79ff0daca5SRussell King { 8024f11ec0SRussell King unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); 81ff0daca5SRussell King pgd_t *pgd; 82ff0daca5SRussell King 83ff0daca5SRussell King flush_cache_vunmap(addr, end); 84ff0daca5SRussell King pgd = pgd_offset_k(addr); 85ff0daca5SRussell King do { 86ff0daca5SRussell King pmd_t pmd, *pmdp = pmd_offset(pgd, addr); 87ff0daca5SRussell King 88ff0daca5SRussell King pmd = *pmdp; 89ff0daca5SRussell King if (!pmd_none(pmd)) { 90ff0daca5SRussell King /* 91ff0daca5SRussell King * Clear the PMD from the page table, and 92ff0daca5SRussell King * increment the kvm sequence so others 93ff0daca5SRussell King * notice this change. 94ff0daca5SRussell King * 95ff0daca5SRussell King * Note: this is still racy on SMP machines. 96ff0daca5SRussell King */ 97ff0daca5SRussell King pmd_clear(pmdp); 98ff0daca5SRussell King init_mm.context.kvm_seq++; 99ff0daca5SRussell King 100ff0daca5SRussell King /* 101ff0daca5SRussell King * Free the page table, if there was one. 102ff0daca5SRussell King */ 103ff0daca5SRussell King if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) 1045e541973SBenjamin Herrenschmidt pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); 105ff0daca5SRussell King } 106ff0daca5SRussell King 107ff0daca5SRussell King addr += PGDIR_SIZE; 108ff0daca5SRussell King pgd++; 109ff0daca5SRussell King } while (addr < end); 110ff0daca5SRussell King 111ff0daca5SRussell King /* 112ff0daca5SRussell King * Ensure that the active_mm is up to date - we want to 113ff0daca5SRussell King * catch any use-after-iounmap cases. 114ff0daca5SRussell King */ 115ff0daca5SRussell King if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) 116ff0daca5SRussell King __check_kvm_seq(current->active_mm); 117ff0daca5SRussell King 118ff0daca5SRussell King flush_tlb_kernel_range(virt, end); 119ff0daca5SRussell King } 120ff0daca5SRussell King 121ff0daca5SRussell King static int 122ff0daca5SRussell King remap_area_sections(unsigned long virt, unsigned long pfn, 123b29e9f5eSRussell King size_t size, const struct mem_type *type) 124ff0daca5SRussell King { 125b29e9f5eSRussell King unsigned long addr = virt, end = virt + size; 126ff0daca5SRussell King pgd_t *pgd; 127ff0daca5SRussell King 128ff0daca5SRussell King /* 129ff0daca5SRussell King * Remove and free any PTE-based mapping, and 130ff0daca5SRussell King * sync the current kernel mapping. 131ff0daca5SRussell King */ 132ff0daca5SRussell King unmap_area_sections(virt, size); 133ff0daca5SRussell King 134ff0daca5SRussell King pgd = pgd_offset_k(addr); 135ff0daca5SRussell King do { 136ff0daca5SRussell King pmd_t *pmd = pmd_offset(pgd, addr); 137ff0daca5SRussell King 138b29e9f5eSRussell King pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 139ff0daca5SRussell King pfn += SZ_1M >> PAGE_SHIFT; 140b29e9f5eSRussell King pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 141ff0daca5SRussell King pfn += SZ_1M >> PAGE_SHIFT; 142ff0daca5SRussell King flush_pmd_entry(pmd); 143ff0daca5SRussell King 144ff0daca5SRussell King addr += PGDIR_SIZE; 145ff0daca5SRussell King pgd++; 146ff0daca5SRussell King } while (addr < end); 147ff0daca5SRussell King 148ff0daca5SRussell King return 0; 149ff0daca5SRussell King } 150a069c896SLennert Buytenhek 151a069c896SLennert Buytenhek static int 152a069c896SLennert Buytenhek remap_area_supersections(unsigned long virt, unsigned long pfn, 153b29e9f5eSRussell King size_t size, const struct mem_type *type) 154a069c896SLennert Buytenhek { 155b29e9f5eSRussell King unsigned long addr = virt, end = virt + size; 156a069c896SLennert Buytenhek pgd_t *pgd; 157a069c896SLennert Buytenhek 158a069c896SLennert Buytenhek /* 159a069c896SLennert Buytenhek * Remove and free any PTE-based mapping, and 160a069c896SLennert Buytenhek * sync the current kernel mapping. 161a069c896SLennert Buytenhek */ 162a069c896SLennert Buytenhek unmap_area_sections(virt, size); 163a069c896SLennert Buytenhek 164a069c896SLennert Buytenhek pgd = pgd_offset_k(virt); 165a069c896SLennert Buytenhek do { 166a069c896SLennert Buytenhek unsigned long super_pmd_val, i; 167a069c896SLennert Buytenhek 168b29e9f5eSRussell King super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | 169b29e9f5eSRussell King PMD_SECT_SUPER; 170a069c896SLennert Buytenhek super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; 171a069c896SLennert Buytenhek 172a069c896SLennert Buytenhek for (i = 0; i < 8; i++) { 173a069c896SLennert Buytenhek pmd_t *pmd = pmd_offset(pgd, addr); 174a069c896SLennert Buytenhek 175a069c896SLennert Buytenhek pmd[0] = __pmd(super_pmd_val); 176a069c896SLennert Buytenhek pmd[1] = __pmd(super_pmd_val); 177a069c896SLennert Buytenhek flush_pmd_entry(pmd); 178a069c896SLennert Buytenhek 179a069c896SLennert Buytenhek addr += PGDIR_SIZE; 180a069c896SLennert Buytenhek pgd++; 181a069c896SLennert Buytenhek } 182a069c896SLennert Buytenhek 183a069c896SLennert Buytenhek pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; 184a069c896SLennert Buytenhek } while (addr < end); 185a069c896SLennert Buytenhek 186a069c896SLennert Buytenhek return 0; 187a069c896SLennert Buytenhek } 188ff0daca5SRussell King #endif 189ff0daca5SRussell King 19031aa8fd6SRussell King void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 19131aa8fd6SRussell King unsigned long offset, size_t size, unsigned int mtype, void *caller) 1929d4ae727SDeepak Saxena { 193b29e9f5eSRussell King const struct mem_type *type; 194ff0daca5SRussell King int err; 1959d4ae727SDeepak Saxena unsigned long addr; 1969d4ae727SDeepak Saxena struct vm_struct * area; 197a069c896SLennert Buytenhek 198a069c896SLennert Buytenhek /* 199a069c896SLennert Buytenhek * High mappings must be supersection aligned 200a069c896SLennert Buytenhek */ 201a069c896SLennert Buytenhek if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 202a069c896SLennert Buytenhek return NULL; 2039d4ae727SDeepak Saxena 204309caa9cSRussell King /* 205309caa9cSRussell King * Don't allow RAM to be mapped - this causes problems with ARMv6+ 206309caa9cSRussell King */ 20767cfa23aSRussell King if (WARN_ON(pfn_valid(pfn))) 20867cfa23aSRussell King return NULL; 209309caa9cSRussell King 2103603ab2bSRussell King type = get_mem_type(mtype); 2113603ab2bSRussell King if (!type) 2123603ab2bSRussell King return NULL; 213b29e9f5eSRussell King 2146d78b5f9SRussell King /* 2156d78b5f9SRussell King * Page align the mapping size, taking account of any offset. 2166d78b5f9SRussell King */ 2176d78b5f9SRussell King size = PAGE_ALIGN(offset + size); 218c924aff8SRussell King 21931aa8fd6SRussell King area = get_vm_area_caller(size, VM_IOREMAP, caller); 2209d4ae727SDeepak Saxena if (!area) 2219d4ae727SDeepak Saxena return NULL; 2229d4ae727SDeepak Saxena addr = (unsigned long)area->addr; 223ff0daca5SRussell King 224ff0daca5SRussell King #ifndef CONFIG_SMP 225412489afSCatalin Marinas if (DOMAIN_IO == 0 && 226412489afSCatalin Marinas (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 2274a56c1e4SRussell King cpu_is_xsc3()) && pfn >= 0x100000 && 228a069c896SLennert Buytenhek !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 229a069c896SLennert Buytenhek area->flags |= VM_ARM_SECTION_MAPPING; 230b29e9f5eSRussell King err = remap_area_supersections(addr, pfn, size, type); 231a069c896SLennert Buytenhek } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { 232ff0daca5SRussell King area->flags |= VM_ARM_SECTION_MAPPING; 233b29e9f5eSRussell King err = remap_area_sections(addr, pfn, size, type); 234ff0daca5SRussell King } else 235ff0daca5SRussell King #endif 236d7461963SRussell King err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), 237d7461963SRussell King __pgprot(type->prot_pte)); 238ff0daca5SRussell King 239ff0daca5SRussell King if (err) { 240478922c2SCatalin Marinas vunmap((void *)addr); 2419d4ae727SDeepak Saxena return NULL; 2429d4ae727SDeepak Saxena } 243ff0daca5SRussell King 244ff0daca5SRussell King flush_cache_vmap(addr, addr + size); 245ff0daca5SRussell King return (void __iomem *) (offset + addr); 2469d4ae727SDeepak Saxena } 2479d4ae727SDeepak Saxena 24831aa8fd6SRussell King void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, 24931aa8fd6SRussell King unsigned int mtype, void *caller) 2501da177e4SLinus Torvalds { 2519d4ae727SDeepak Saxena unsigned long last_addr; 2529d4ae727SDeepak Saxena unsigned long offset = phys_addr & ~PAGE_MASK; 2539d4ae727SDeepak Saxena unsigned long pfn = __phys_to_pfn(phys_addr); 2541da177e4SLinus Torvalds 2559d4ae727SDeepak Saxena /* 2569d4ae727SDeepak Saxena * Don't allow wraparound or zero size 2579d4ae727SDeepak Saxena */ 2581da177e4SLinus Torvalds last_addr = phys_addr + size - 1; 2591da177e4SLinus Torvalds if (!size || last_addr < phys_addr) 2601da177e4SLinus Torvalds return NULL; 2611da177e4SLinus Torvalds 26231aa8fd6SRussell King return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 26331aa8fd6SRussell King caller); 26431aa8fd6SRussell King } 26531aa8fd6SRussell King 26631aa8fd6SRussell King /* 26731aa8fd6SRussell King * Remap an arbitrary physical address space into the kernel virtual 26831aa8fd6SRussell King * address space. Needed when the kernel wants to access high addresses 26931aa8fd6SRussell King * directly. 27031aa8fd6SRussell King * 27131aa8fd6SRussell King * NOTE! We need to allow non-page-aligned mappings too: we will obviously 27231aa8fd6SRussell King * have to convert them into an offset in a page-aligned mapping, but the 27331aa8fd6SRussell King * caller shouldn't need to know that small detail. 27431aa8fd6SRussell King */ 27531aa8fd6SRussell King void __iomem * 27631aa8fd6SRussell King __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, 27731aa8fd6SRussell King unsigned int mtype) 27831aa8fd6SRussell King { 27931aa8fd6SRussell King return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 28031aa8fd6SRussell King __builtin_return_address(0)); 28131aa8fd6SRussell King } 28231aa8fd6SRussell King EXPORT_SYMBOL(__arm_ioremap_pfn); 28331aa8fd6SRussell King 28431aa8fd6SRussell King void __iomem * 28531aa8fd6SRussell King __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) 28631aa8fd6SRussell King { 28731aa8fd6SRussell King return __arm_ioremap_caller(phys_addr, size, mtype, 28831aa8fd6SRussell King __builtin_return_address(0)); 2891da177e4SLinus Torvalds } 2903603ab2bSRussell King EXPORT_SYMBOL(__arm_ioremap); 2911da177e4SLinus Torvalds 29209d9bae0SRussell King void __iounmap(volatile void __iomem *io_addr) 2931da177e4SLinus Torvalds { 29409d9bae0SRussell King void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 295ceaccbd2SCatalin Marinas #ifndef CONFIG_SMP 296ff0daca5SRussell King struct vm_struct **p, *tmp; 297ff0daca5SRussell King 298ff0daca5SRussell King /* 299ff0daca5SRussell King * If this is a section based mapping we need to handle it 3006cbdc8c5SSimon Arlott * specially as the VM subsystem does not know how to handle 301ff0daca5SRussell King * such a beast. We need the lock here b/c we need to clear 302ff0daca5SRussell King * all the mappings before the area can be reclaimed 303ff0daca5SRussell King * by someone else. 304ff0daca5SRussell King */ 305ff0daca5SRussell King write_lock(&vmlist_lock); 306ff0daca5SRussell King for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 307ff0daca5SRussell King if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 308ff0daca5SRussell King if (tmp->flags & VM_ARM_SECTION_MAPPING) { 309ff0daca5SRussell King unmap_area_sections((unsigned long)tmp->addr, 310ff0daca5SRussell King tmp->size); 311ff0daca5SRussell King } 312ff0daca5SRussell King break; 313ff0daca5SRussell King } 314ff0daca5SRussell King } 315ff0daca5SRussell King write_unlock(&vmlist_lock); 3167cddc397SLennert Buytenhek #endif 317ff0daca5SRussell King 31809d9bae0SRussell King vunmap(addr); 3191da177e4SLinus Torvalds } 3201da177e4SLinus Torvalds EXPORT_SYMBOL(__iounmap); 321