1 /* 2 * linux/arch/arm/mm/nommu.c 3 * 4 * ARM uCLinux supporting functions. 5 */ 6 #include <linux/module.h> 7 #include <linux/mm.h> 8 #include <linux/pagemap.h> 9 #include <linux/io.h> 10 #include <linux/memblock.h> 11 #include <linux/kernel.h> 12 13 #include <asm/cacheflush.h> 14 #include <asm/cp15.h> 15 #include <asm/sections.h> 16 #include <asm/page.h> 17 #include <asm/setup.h> 18 #include <asm/traps.h> 19 #include <asm/mach/arch.h> 20 #include <asm/cputype.h> 21 #include <asm/mpu.h> 22 #include <asm/procinfo.h> 23 24 #include "mm.h" 25 26 unsigned long vectors_base; 27 28 #ifdef CONFIG_ARM_MPU 29 struct mpu_rgn_info mpu_rgn_info; 30 #endif 31 32 #ifdef CONFIG_CPU_CP15 33 #ifdef CONFIG_CPU_HIGH_VECTOR 34 unsigned long setup_vectors_base(void) 35 { 36 unsigned long reg = get_cr(); 37 38 set_cr(reg | CR_V); 39 return 0xffff0000; 40 } 41 #else /* CONFIG_CPU_HIGH_VECTOR */ 42 /* Write exception base address to VBAR */ 43 static inline void set_vbar(unsigned long val) 44 { 45 asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc"); 46 } 47 48 /* 49 * Security extensions, bits[7:4], permitted values, 50 * 0b0000 - not implemented, 0b0001/0b0010 - implemented 51 */ 52 static inline bool security_extensions_enabled(void) 53 { 54 /* Check CPUID Identification Scheme before ID_PFR1 read */ 55 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) 56 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); 57 return 0; 58 } 59 60 unsigned long setup_vectors_base(void) 61 { 62 unsigned long base = 0, reg = get_cr(); 63 64 set_cr(reg & ~CR_V); 65 if (security_extensions_enabled()) { 66 if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) 67 base = CONFIG_DRAM_BASE; 68 set_vbar(base); 69 } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) { 70 if (CONFIG_DRAM_BASE != 0) 71 pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n"); 72 } 73 74 return base; 75 } 76 #endif /* CONFIG_CPU_HIGH_VECTOR */ 77 #endif /* CONFIG_CPU_CP15 */ 78 79 void __init arm_mm_memblock_reserve(void) 80 { 81 #ifndef CONFIG_CPU_V7M 82 vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0; 83 /* 84 * Register the exception vector page. 85 * some architectures which the DRAM is the exception vector to trap, 86 * alloc_page breaks with error, although it is not NULL, but "0." 87 */ 88 memblock_reserve(vectors_base, 2 * PAGE_SIZE); 89 #else /* ifndef CONFIG_CPU_V7M */ 90 /* 91 * There is no dedicated vector page on V7-M. So nothing needs to be 92 * reserved here. 93 */ 94 #endif 95 /* 96 * In any case, always ensure address 0 is never used as many things 97 * get very confused if 0 is returned as a legitimate address. 98 */ 99 memblock_reserve(0, 1); 100 } 101 102 static void __init adjust_lowmem_bounds_mpu(void) 103 { 104 unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA; 105 106 switch (pmsa) { 107 case MMFR0_PMSAv7: 108 pmsav7_adjust_lowmem_bounds(); 109 break; 110 case MMFR0_PMSAv8: 111 pmsav8_adjust_lowmem_bounds(); 112 break; 113 default: 114 break; 115 } 116 } 117 118 static void __init mpu_setup(void) 119 { 120 unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA; 121 122 switch (pmsa) { 123 case MMFR0_PMSAv7: 124 pmsav7_setup(); 125 break; 126 case MMFR0_PMSAv8: 127 pmsav8_setup(); 128 break; 129 default: 130 break; 131 } 132 } 133 134 void __init adjust_lowmem_bounds(void) 135 { 136 phys_addr_t end; 137 adjust_lowmem_bounds_mpu(); 138 end = memblock_end_of_DRAM(); 139 high_memory = __va(end - 1) + 1; 140 memblock_set_current_limit(end); 141 } 142 143 /* 144 * paging_init() sets up the page tables, initialises the zone memory 145 * maps, and sets up the zero page, bad page and bad page tables. 146 */ 147 void __init paging_init(const struct machine_desc *mdesc) 148 { 149 early_trap_init((void *)vectors_base); 150 mpu_setup(); 151 bootmem_init(); 152 } 153 154 /* 155 * We don't need to do anything here for nommu machines. 156 */ 157 void setup_mm_for_reboot(void) 158 { 159 } 160 161 void flush_dcache_page(struct page *page) 162 { 163 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 164 } 165 EXPORT_SYMBOL(flush_dcache_page); 166 167 void flush_kernel_dcache_page(struct page *page) 168 { 169 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 170 } 171 EXPORT_SYMBOL(flush_kernel_dcache_page); 172 173 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 174 unsigned long uaddr, void *dst, const void *src, 175 unsigned long len) 176 { 177 memcpy(dst, src, len); 178 if (vma->vm_flags & VM_EXEC) 179 __cpuc_coherent_user_range(uaddr, uaddr + len); 180 } 181 182 void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, 183 size_t size, unsigned int mtype) 184 { 185 if (pfn >= (0x100000000ULL >> PAGE_SHIFT)) 186 return NULL; 187 return (void __iomem *) (offset + (pfn << PAGE_SHIFT)); 188 } 189 EXPORT_SYMBOL(__arm_ioremap_pfn); 190 191 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, 192 unsigned int mtype, void *caller) 193 { 194 return (void __iomem *)phys_addr; 195 } 196 197 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); 198 199 void __iomem *ioremap(resource_size_t res_cookie, size_t size) 200 { 201 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE, 202 __builtin_return_address(0)); 203 } 204 EXPORT_SYMBOL(ioremap); 205 206 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) 207 __alias(ioremap_cached); 208 209 void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) 210 { 211 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, 212 __builtin_return_address(0)); 213 } 214 EXPORT_SYMBOL(ioremap_cache); 215 EXPORT_SYMBOL(ioremap_cached); 216 217 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) 218 { 219 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC, 220 __builtin_return_address(0)); 221 } 222 EXPORT_SYMBOL(ioremap_wc); 223 224 #ifdef CONFIG_PCI 225 226 #include <asm/mach/map.h> 227 228 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size) 229 { 230 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED, 231 __builtin_return_address(0)); 232 } 233 EXPORT_SYMBOL_GPL(pci_remap_cfgspace); 234 #endif 235 236 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) 237 { 238 return (void *)phys_addr; 239 } 240 241 void __iounmap(volatile void __iomem *addr) 242 { 243 } 244 EXPORT_SYMBOL(__iounmap); 245 246 void (*arch_iounmap)(volatile void __iomem *); 247 248 void iounmap(volatile void __iomem *addr) 249 { 250 } 251 EXPORT_SYMBOL(iounmap); 252