1 /* 2 * Provide common bits of early_ioremap() support for architectures needing 3 * temporary mappings during boot before ioremap() is available. 4 * 5 * This is mostly a direct copy of the x86 early_ioremap implementation. 6 * 7 * (C) Copyright 1995 1996, 2014 Linus Torvalds 8 * 9 */ 10 #include <linux/kernel.h> 11 #include <linux/init.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/mm.h> 16 #include <linux/vmalloc.h> 17 #include <asm/fixmap.h> 18 #include <asm/early_ioremap.h> 19 20 #ifdef CONFIG_MMU 21 static int early_ioremap_debug __initdata; 22 23 static int __init early_ioremap_debug_setup(char *str) 24 { 25 early_ioremap_debug = 1; 26 27 return 0; 28 } 29 early_param("early_ioremap_debug", early_ioremap_debug_setup); 30 31 static int after_paging_init __initdata; 32 33 pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr, 34 unsigned long size, 35 pgprot_t prot) 36 { 37 return prot; 38 } 39 40 void __init __weak early_ioremap_shutdown(void) 41 { 42 } 43 44 void __init early_ioremap_reset(void) 45 { 46 early_ioremap_shutdown(); 47 after_paging_init = 1; 48 } 49 50 /* 51 * Generally, ioremap() is available after paging_init() has been called. 52 * Architectures wanting to allow early_ioremap after paging_init() can 53 * define __late_set_fixmap and __late_clear_fixmap to do the right thing. 54 */ 55 #ifndef __late_set_fixmap 56 static inline void __init __late_set_fixmap(enum fixed_addresses idx, 57 phys_addr_t phys, pgprot_t prot) 58 { 59 BUG(); 60 } 61 #endif 62 63 #ifndef __late_clear_fixmap 64 static inline void __init __late_clear_fixmap(enum fixed_addresses idx) 65 { 66 BUG(); 67 } 68 #endif 69 70 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; 71 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; 72 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; 73 74 void __init early_ioremap_setup(void) 75 { 76 int i; 77 78 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) 79 if (WARN_ON(prev_map[i])) 80 break; 81 82 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) 83 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); 84 } 85 86 static int __init check_early_ioremap_leak(void) 87 { 88 int count = 0; 89 int i; 90 91 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) 92 if (prev_map[i]) 93 count++; 94 95 if (WARN(count, KERN_WARNING 96 "Debug warning: early ioremap leak of %d areas detected.\n" 97 "please boot with early_ioremap_debug and report the dmesg.\n", 98 count)) 99 return 1; 100 return 0; 101 } 102 late_initcall(check_early_ioremap_leak); 103 104 static void __init __iomem * 105 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) 106 { 107 unsigned long offset; 108 resource_size_t last_addr; 109 unsigned int nrpages; 110 enum fixed_addresses idx; 111 int i, slot; 112 113 WARN_ON(system_state != SYSTEM_BOOTING); 114 115 slot = -1; 116 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { 117 if (!prev_map[i]) { 118 slot = i; 119 break; 120 } 121 } 122 123 if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n", 124 __func__, (u64)phys_addr, size)) 125 return NULL; 126 127 /* Don't allow wraparound or zero size */ 128 last_addr = phys_addr + size - 1; 129 if (WARN_ON(!size || last_addr < phys_addr)) 130 return NULL; 131 132 prev_size[slot] = size; 133 /* 134 * Mappings have to be page-aligned 135 */ 136 offset = offset_in_page(phys_addr); 137 phys_addr &= PAGE_MASK; 138 size = PAGE_ALIGN(last_addr + 1) - phys_addr; 139 140 /* 141 * Mappings have to fit in the FIX_BTMAP area. 142 */ 143 nrpages = size >> PAGE_SHIFT; 144 if (WARN_ON(nrpages > NR_FIX_BTMAPS)) 145 return NULL; 146 147 /* 148 * Ok, go for it.. 149 */ 150 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; 151 while (nrpages > 0) { 152 if (after_paging_init) 153 __late_set_fixmap(idx, phys_addr, prot); 154 else 155 __early_set_fixmap(idx, phys_addr, prot); 156 phys_addr += PAGE_SIZE; 157 --idx; 158 --nrpages; 159 } 160 WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n", 161 __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]); 162 163 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); 164 return prev_map[slot]; 165 } 166 167 void __init early_iounmap(void __iomem *addr, unsigned long size) 168 { 169 unsigned long virt_addr; 170 unsigned long offset; 171 unsigned int nrpages; 172 enum fixed_addresses idx; 173 int i, slot; 174 175 slot = -1; 176 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { 177 if (prev_map[i] == addr) { 178 slot = i; 179 break; 180 } 181 } 182 183 if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n", 184 addr, size)) 185 return; 186 187 if (WARN(prev_size[slot] != size, 188 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n", 189 addr, size, slot, prev_size[slot])) 190 return; 191 192 WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n", 193 addr, size, slot); 194 195 virt_addr = (unsigned long)addr; 196 if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))) 197 return; 198 199 offset = offset_in_page(virt_addr); 200 nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; 201 202 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; 203 while (nrpages > 0) { 204 if (after_paging_init) 205 __late_clear_fixmap(idx); 206 else 207 __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR); 208 --idx; 209 --nrpages; 210 } 211 prev_map[slot] = NULL; 212 } 213 214 /* Remap an IO device */ 215 void __init __iomem * 216 early_ioremap(resource_size_t phys_addr, unsigned long size) 217 { 218 return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO); 219 } 220 221 /* Remap memory */ 222 void __init * 223 early_memremap(resource_size_t phys_addr, unsigned long size) 224 { 225 pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size, 226 FIXMAP_PAGE_NORMAL); 227 228 return (__force void *)__early_ioremap(phys_addr, size, prot); 229 } 230 #ifdef FIXMAP_PAGE_RO 231 void __init * 232 early_memremap_ro(resource_size_t phys_addr, unsigned long size) 233 { 234 pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size, 235 FIXMAP_PAGE_RO); 236 237 return (__force void *)__early_ioremap(phys_addr, size, prot); 238 } 239 #endif 240 241 #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT 242 void __init * 243 early_memremap_prot(resource_size_t phys_addr, unsigned long size, 244 unsigned long prot_val) 245 { 246 return (__force void *)__early_ioremap(phys_addr, size, 247 __pgprot(prot_val)); 248 } 249 #endif 250 251 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) 252 253 void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) 254 { 255 unsigned long slop, clen; 256 char *p; 257 258 while (size) { 259 slop = offset_in_page(src); 260 clen = size; 261 if (clen > MAX_MAP_CHUNK - slop) 262 clen = MAX_MAP_CHUNK - slop; 263 p = early_memremap(src & PAGE_MASK, clen + slop); 264 memcpy(dest, p + slop, clen); 265 early_memunmap(p, clen + slop); 266 dest += clen; 267 src += clen; 268 size -= clen; 269 } 270 } 271 272 #else /* CONFIG_MMU */ 273 274 void __init __iomem * 275 early_ioremap(resource_size_t phys_addr, unsigned long size) 276 { 277 return (__force void __iomem *)phys_addr; 278 } 279 280 /* Remap memory */ 281 void __init * 282 early_memremap(resource_size_t phys_addr, unsigned long size) 283 { 284 return (void *)phys_addr; 285 } 286 void __init * 287 early_memremap_ro(resource_size_t phys_addr, unsigned long size) 288 { 289 return (void *)phys_addr; 290 } 291 292 void __init early_iounmap(void __iomem *addr, unsigned long size) 293 { 294 } 295 296 #endif /* CONFIG_MMU */ 297 298 299 void __init early_memunmap(void *addr, unsigned long size) 300 { 301 early_iounmap((__force void __iomem *)addr, size); 302 } 303