1 /* 2 * linux/arch/arm/mm/ioremap.c 3 * 4 * Re-map IO memory to kernel address space so that we can access it. 5 * 6 * (C) Copyright 1995 1996 Linus Torvalds 7 * 8 * Hacked for ARM by Phil Blundell <philb@gnu.org> 9 * Hacked to allow all architectures to build, and various cleanups 10 * by Russell King 11 * 12 * This allows a driver to remap an arbitrary region of bus memory into 13 * virtual space. One should *only* use readl, writel, memcpy_toio and 14 * so on with such remapped areas. 15 * 16 * Because the ARM only has a 32-bit address space we can't address the 17 * whole of the (physical) PCI space at once. PCI huge-mode addressing 18 * allows us to circumvent this restriction by splitting PCI space into 19 * two 2GB chunks and mapping only one at a time into processor memory. 20 * We use MMU protection domains to trap any attempt to access the bank 21 * that is not currently mapped. (This isn't fully implemented yet.) 22 */ 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/mm.h> 26 #include <linux/vmalloc.h> 27 #include <linux/io.h> 28 29 #include <asm/cp15.h> 30 #include <asm/cputype.h> 31 #include <asm/cacheflush.h> 32 #include <asm/mmu_context.h> 33 #include <asm/pgalloc.h> 34 #include <asm/tlbflush.h> 35 #include <asm/sizes.h> 36 #include <asm/system_info.h> 37 38 #include <asm/mach/map.h> 39 #include "mm.h" 40 41 int ioremap_page(unsigned long virt, unsigned long phys, 42 const struct mem_type *mtype) 43 { 44 return ioremap_page_range(virt, virt + PAGE_SIZE, phys, 45 __pgprot(mtype->prot_pte)); 46 } 47 EXPORT_SYMBOL(ioremap_page); 48 49 void __check_kvm_seq(struct mm_struct *mm) 50 { 51 unsigned int seq; 52 53 do { 54 seq = init_mm.context.kvm_seq; 55 memcpy(pgd_offset(mm, VMALLOC_START), 56 pgd_offset_k(VMALLOC_START), 57 sizeof(pgd_t) * (pgd_index(VMALLOC_END) - 58 pgd_index(VMALLOC_START))); 59 mm->context.kvm_seq = seq; 60 } while (seq != init_mm.context.kvm_seq); 61 } 62 63 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 64 /* 65 * Section support is unsafe on SMP - If you iounmap and ioremap a region, 66 * the other CPUs will not see this change until their next context switch. 67 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs 68 * which requires the new ioremap'd region to be referenced, the CPU will 69 * reference the _old_ region. 70 * 71 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to 72 * mask the size back to 1MB aligned or we will overflow in the loop below. 73 */ 74 static void unmap_area_sections(unsigned long virt, unsigned long size) 75 { 76 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); 77 pgd_t *pgd; 78 pud_t *pud; 79 pmd_t *pmdp; 80 81 flush_cache_vunmap(addr, end); 82 pgd = pgd_offset_k(addr); 83 pud = pud_offset(pgd, addr); 84 pmdp = pmd_offset(pud, addr); 85 do { 86 pmd_t pmd = *pmdp; 87 88 if (!pmd_none(pmd)) { 89 /* 90 * Clear the PMD from the page table, and 91 * increment the kvm sequence so others 92 * notice this change. 93 * 94 * Note: this is still racy on SMP machines. 95 */ 96 pmd_clear(pmdp); 97 init_mm.context.kvm_seq++; 98 99 /* 100 * Free the page table, if there was one. 101 */ 102 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) 103 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); 104 } 105 106 addr += PMD_SIZE; 107 pmdp += 2; 108 } while (addr < end); 109 110 /* 111 * Ensure that the active_mm is up to date - we want to 112 * catch any use-after-iounmap cases. 113 */ 114 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) 115 __check_kvm_seq(current->active_mm); 116 117 flush_tlb_kernel_range(virt, end); 118 } 119 120 static int 121 remap_area_sections(unsigned long virt, unsigned long pfn, 122 size_t size, const struct mem_type *type) 123 { 124 unsigned long addr = virt, end = virt + size; 125 pgd_t *pgd; 126 pud_t *pud; 127 pmd_t *pmd; 128 129 /* 130 * Remove and free any PTE-based mapping, and 131 * sync the current kernel mapping. 132 */ 133 unmap_area_sections(virt, size); 134 135 pgd = pgd_offset_k(addr); 136 pud = pud_offset(pgd, addr); 137 pmd = pmd_offset(pud, addr); 138 do { 139 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 140 pfn += SZ_1M >> PAGE_SHIFT; 141 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 142 pfn += SZ_1M >> PAGE_SHIFT; 143 flush_pmd_entry(pmd); 144 145 addr += PMD_SIZE; 146 pmd += 2; 147 } while (addr < end); 148 149 return 0; 150 } 151 152 static int 153 remap_area_supersections(unsigned long virt, unsigned long pfn, 154 size_t size, const struct mem_type *type) 155 { 156 unsigned long addr = virt, end = virt + size; 157 pgd_t *pgd; 158 pud_t *pud; 159 pmd_t *pmd; 160 161 /* 162 * Remove and free any PTE-based mapping, and 163 * sync the current kernel mapping. 164 */ 165 unmap_area_sections(virt, size); 166 167 pgd = pgd_offset_k(virt); 168 pud = pud_offset(pgd, addr); 169 pmd = pmd_offset(pud, addr); 170 do { 171 unsigned long super_pmd_val, i; 172 173 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | 174 PMD_SECT_SUPER; 175 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; 176 177 for (i = 0; i < 8; i++) { 178 pmd[0] = __pmd(super_pmd_val); 179 pmd[1] = __pmd(super_pmd_val); 180 flush_pmd_entry(pmd); 181 182 addr += PMD_SIZE; 183 pmd += 2; 184 } 185 186 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; 187 } while (addr < end); 188 189 return 0; 190 } 191 #endif 192 193 void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 194 unsigned long offset, size_t size, unsigned int mtype, void *caller) 195 { 196 const struct mem_type *type; 197 int err; 198 unsigned long addr; 199 struct vm_struct * area; 200 201 #ifndef CONFIG_ARM_LPAE 202 /* 203 * High mappings must be supersection aligned 204 */ 205 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 206 return NULL; 207 #endif 208 209 type = get_mem_type(mtype); 210 if (!type) 211 return NULL; 212 213 /* 214 * Page align the mapping size, taking account of any offset. 215 */ 216 size = PAGE_ALIGN(offset + size); 217 218 /* 219 * Try to reuse one of the static mapping whenever possible. 220 */ 221 read_lock(&vmlist_lock); 222 for (area = vmlist; area; area = area->next) { 223 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) 224 break; 225 if (!(area->flags & VM_ARM_STATIC_MAPPING)) 226 continue; 227 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) 228 continue; 229 if (__phys_to_pfn(area->phys_addr) > pfn || 230 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) 231 continue; 232 /* we can drop the lock here as we know *area is static */ 233 read_unlock(&vmlist_lock); 234 addr = (unsigned long)area->addr; 235 addr += __pfn_to_phys(pfn) - area->phys_addr; 236 return (void __iomem *) (offset + addr); 237 } 238 read_unlock(&vmlist_lock); 239 240 /* 241 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 242 */ 243 if (WARN_ON(pfn_valid(pfn))) 244 return NULL; 245 246 area = get_vm_area_caller(size, VM_IOREMAP, caller); 247 if (!area) 248 return NULL; 249 addr = (unsigned long)area->addr; 250 251 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 252 if (DOMAIN_IO == 0 && 253 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 254 cpu_is_xsc3()) && pfn >= 0x100000 && 255 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 256 area->flags |= VM_ARM_SECTION_MAPPING; 257 err = remap_area_supersections(addr, pfn, size, type); 258 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { 259 area->flags |= VM_ARM_SECTION_MAPPING; 260 err = remap_area_sections(addr, pfn, size, type); 261 } else 262 #endif 263 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), 264 __pgprot(type->prot_pte)); 265 266 if (err) { 267 vunmap((void *)addr); 268 return NULL; 269 } 270 271 flush_cache_vmap(addr, addr + size); 272 return (void __iomem *) (offset + addr); 273 } 274 275 void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, 276 unsigned int mtype, void *caller) 277 { 278 unsigned long last_addr; 279 unsigned long offset = phys_addr & ~PAGE_MASK; 280 unsigned long pfn = __phys_to_pfn(phys_addr); 281 282 /* 283 * Don't allow wraparound or zero size 284 */ 285 last_addr = phys_addr + size - 1; 286 if (!size || last_addr < phys_addr) 287 return NULL; 288 289 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 290 caller); 291 } 292 293 /* 294 * Remap an arbitrary physical address space into the kernel virtual 295 * address space. Needed when the kernel wants to access high addresses 296 * directly. 297 * 298 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 299 * have to convert them into an offset in a page-aligned mapping, but the 300 * caller shouldn't need to know that small detail. 301 */ 302 void __iomem * 303 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, 304 unsigned int mtype) 305 { 306 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 307 __builtin_return_address(0)); 308 } 309 EXPORT_SYMBOL(__arm_ioremap_pfn); 310 311 void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, 312 unsigned int, void *) = 313 __arm_ioremap_caller; 314 315 void __iomem * 316 __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) 317 { 318 return arch_ioremap_caller(phys_addr, size, mtype, 319 __builtin_return_address(0)); 320 } 321 EXPORT_SYMBOL(__arm_ioremap); 322 323 /* 324 * Remap an arbitrary physical address space into the kernel virtual 325 * address space as memory. Needed when the kernel wants to execute 326 * code in external memory. This is needed for reprogramming source 327 * clocks that would affect normal memory for example. Please see 328 * CONFIG_GENERIC_ALLOCATOR for allocating external memory. 329 */ 330 void __iomem * 331 __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) 332 { 333 unsigned int mtype; 334 335 if (cached) 336 mtype = MT_MEMORY; 337 else 338 mtype = MT_MEMORY_NONCACHED; 339 340 return __arm_ioremap_caller(phys_addr, size, mtype, 341 __builtin_return_address(0)); 342 } 343 344 void __iounmap(volatile void __iomem *io_addr) 345 { 346 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 347 struct vm_struct *vm; 348 349 read_lock(&vmlist_lock); 350 for (vm = vmlist; vm; vm = vm->next) { 351 if (vm->addr > addr) 352 break; 353 if (!(vm->flags & VM_IOREMAP)) 354 continue; 355 /* If this is a static mapping we must leave it alone */ 356 if ((vm->flags & VM_ARM_STATIC_MAPPING) && 357 (vm->addr <= addr) && (vm->addr + vm->size > addr)) { 358 read_unlock(&vmlist_lock); 359 return; 360 } 361 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 362 /* 363 * If this is a section based mapping we need to handle it 364 * specially as the VM subsystem does not know how to handle 365 * such a beast. 366 */ 367 if ((vm->addr == addr) && 368 (vm->flags & VM_ARM_SECTION_MAPPING)) { 369 unmap_area_sections((unsigned long)vm->addr, vm->size); 370 break; 371 } 372 #endif 373 } 374 read_unlock(&vmlist_lock); 375 376 vunmap(addr); 377 } 378 379 void (*arch_iounmap)(volatile void __iomem *) = __iounmap; 380 381 void __arm_iounmap(volatile void __iomem *io_addr) 382 { 383 arch_iounmap(io_addr); 384 } 385 EXPORT_SYMBOL(__arm_iounmap); 386