1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/ioremap.c 4 * 5 * (C) Copyright 1995 1996 Linus Torvalds 6 * Hacked for ARM by Phil Blundell <philb@gnu.org> 7 * Hacked to allow all architectures to build, and various cleanups 8 * by Russell King 9 * Copyright (C) 2012 ARM Ltd. 10 */ 11 12 #include <linux/export.h> 13 #include <linux/mm.h> 14 #include <linux/vmalloc.h> 15 #include <linux/io.h> 16 17 #include <asm/fixmap.h> 18 #include <asm/tlbflush.h> 19 20 static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, 21 pgprot_t prot, void *caller) 22 { 23 unsigned long last_addr; 24 unsigned long offset = phys_addr & ~PAGE_MASK; 25 int err; 26 unsigned long addr; 27 struct vm_struct *area; 28 29 /* 30 * Page align the mapping address and size, taking account of any 31 * offset. 32 */ 33 phys_addr &= PAGE_MASK; 34 size = PAGE_ALIGN(size + offset); 35 36 /* 37 * Don't allow wraparound, zero size or outside PHYS_MASK. 38 */ 39 last_addr = phys_addr + size - 1; 40 if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK)) 41 return NULL; 42 43 /* 44 * Don't allow RAM to be mapped. 45 */ 46 if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr)))) 47 return NULL; 48 49 area = get_vm_area_caller(size, VM_IOREMAP, caller); 50 if (!area) 51 return NULL; 52 addr = (unsigned long)area->addr; 53 area->phys_addr = phys_addr; 54 55 err = ioremap_page_range(addr, addr + size, phys_addr, prot); 56 if (err) { 57 vunmap((void *)addr); 58 return NULL; 59 } 60 61 return (void __iomem *)(offset + addr); 62 } 63 64 void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) 65 { 66 return __ioremap_caller(phys_addr, size, prot, 67 __builtin_return_address(0)); 68 } 69 EXPORT_SYMBOL(__ioremap); 70 71 void iounmap(volatile void __iomem *io_addr) 72 { 73 unsigned long addr = (unsigned long)io_addr & PAGE_MASK; 74 75 /* 76 * We could get an address outside vmalloc range in case 77 * of ioremap_cache() reusing a RAM mapping. 78 */ 79 if (is_vmalloc_addr((void *)addr)) 80 vunmap((void *)addr); 81 } 82 EXPORT_SYMBOL(iounmap); 83 84 void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) 85 { 86 /* For normal memory we already have a cacheable mapping. */ 87 if (pfn_is_map_memory(__phys_to_pfn(phys_addr))) 88 return (void __iomem *)__phys_to_virt(phys_addr); 89 90 return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL), 91 __builtin_return_address(0)); 92 } 93 EXPORT_SYMBOL(ioremap_cache); 94 95 /* 96 * Must be called after early_fixmap_init 97 */ 98 void __init early_ioremap_init(void) 99 { 100 early_ioremap_setup(); 101 } 102 103 bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, 104 unsigned long flags) 105 { 106 unsigned long pfn = PHYS_PFN(offset); 107 108 return pfn_is_map_memory(pfn); 109 } 110