1/* 2 * linux/arch/arm/kernel/head.S 3 * 4 * Copyright (C) 1994-2002 Russell King 5 * Copyright (c) 2003 ARM Limited 6 * All Rights Reserved 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Kernel startup code for all 32-bit CPUs 13 */ 14#include <linux/linkage.h> 15#include <linux/init.h> 16 17#include <asm/assembler.h> 18#include <asm/domain.h> 19#include <asm/ptrace.h> 20#include <asm/asm-offsets.h> 21#include <asm/memory.h> 22#include <asm/thread_info.h> 23#include <asm/system.h> 24 25#if (PHYS_OFFSET & 0x001fffff) 26#error "PHYS_OFFSET must be at an even 2MiB boundary!" 27#endif 28 29#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 30#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET) 31 32 33/* 34 * swapper_pg_dir is the virtual address of the initial page table. 35 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must 36 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect 37 * the least significant 16 bits to be 0x8000, but we could probably 38 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. 39 */ 40#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 41#error KERNEL_RAM_VADDR must start at 0xXXXX8000 42#endif 43 44 .globl swapper_pg_dir 45 .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000 46 47 .macro pgtbl, rd 48 ldr \rd, =(KERNEL_RAM_PADDR - 0x4000) 49 .endm 50 51#ifdef CONFIG_XIP_KERNEL 52#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 53#define KERNEL_END _edata_loc 54#else 55#define KERNEL_START KERNEL_RAM_VADDR 56#define KERNEL_END _end 57#endif 58 59/* 60 * Kernel startup entry point. 61 * --------------------------- 62 * 63 * This is normally called from the decompressor code. The requirements 64 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, 65 * r1 = machine nr, r2 = atags pointer. 66 * 67 * This code is mostly position independent, so if you link the kernel at 68 * 0xc0008000, you call this at __pa(0xc0008000). 69 * 70 * See linux/arch/arm/tools/mach-types for the complete list of machine 71 * numbers for r1. 72 * 73 * We're trying to keep crap to a minimum; DO NOT add any machine specific 74 * crap here - that's what the boot loader (or in extreme, well justified 75 * circumstances, zImage) is for. 76 */ 77 .section ".text.head", "ax" 78ENTRY(stext) 79 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 80 @ and irqs disabled 81 mrc p15, 0, r9, c0, c0 @ get processor id 82 bl __lookup_processor_type @ r5=procinfo r9=cpuid 83 movs r10, r5 @ invalid processor (r5=0)? 84 beq __error_p @ yes, error 'p' 85 bl __lookup_machine_type @ r5=machinfo 86 movs r8, r5 @ invalid machine (r5=0)? 87 beq __error_a @ yes, error 'a' 88 bl __vet_atags 89 bl __create_page_tables 90 91 /* 92 * The following calls CPU specific code in a position independent 93 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 94 * xxx_proc_info structure selected by __lookup_machine_type 95 * above. On return, the CPU will be ready for the MMU to be 96 * turned on, and r0 will hold the CPU control register value. 97 */ 98 ldr r13, __switch_data @ address to jump to after 99 @ mmu has been enabled 100 adr lr, BSYM(__enable_mmu) @ return (PIC) address 101 ARM( add pc, r10, #PROCINFO_INITFUNC ) 102 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 103 THUMB( mov pc, r12 ) 104ENDPROC(stext) 105 106#if defined(CONFIG_SMP) 107ENTRY(secondary_startup) 108 /* 109 * Common entry point for secondary CPUs. 110 * 111 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup 112 * the processor type - there is no need to check the machine type 113 * as it has already been validated by the primary processor. 114 */ 115 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 116 mrc p15, 0, r9, c0, c0 @ get processor id 117 bl __lookup_processor_type 118 movs r10, r5 @ invalid processor? 119 moveq r0, #'p' @ yes, error 'p' 120 beq __error 121 122 /* 123 * Use the page tables supplied from __cpu_up. 124 */ 125 adr r4, __secondary_data 126 ldmia r4, {r5, r7, r12} @ address to jump to after 127 sub r4, r4, r5 @ mmu has been enabled 128 ldr r4, [r7, r4] @ get secondary_data.pgdir 129 adr lr, BSYM(__enable_mmu) @ return address 130 mov r13, r12 @ __secondary_switched address 131 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 132 @ (return control reg) 133 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 134 THUMB( mov pc, r12 ) 135ENDPROC(secondary_startup) 136 137 /* 138 * r6 = &secondary_data 139 */ 140ENTRY(__secondary_switched) 141 ldr sp, [r7, #4] @ get secondary_data.stack 142 mov fp, #0 143 b secondary_start_kernel 144ENDPROC(__secondary_switched) 145 146 .type __secondary_data, %object 147__secondary_data: 148 .long . 149 .long secondary_data 150 .long __secondary_switched 151#endif /* defined(CONFIG_SMP) */ 152 153 154 155/* 156 * Setup common bits before finally enabling the MMU. Essentially 157 * this is just loading the page table pointer and domain access 158 * registers. 159 */ 160__enable_mmu: 161#ifdef CONFIG_ALIGNMENT_TRAP 162 orr r0, r0, #CR_A 163#else 164 bic r0, r0, #CR_A 165#endif 166#ifdef CONFIG_CPU_DCACHE_DISABLE 167 bic r0, r0, #CR_C 168#endif 169#ifdef CONFIG_CPU_BPREDICT_DISABLE 170 bic r0, r0, #CR_Z 171#endif 172#ifdef CONFIG_CPU_ICACHE_DISABLE 173 bic r0, r0, #CR_I 174#endif 175 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 176 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 177 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 178 domain_val(DOMAIN_IO, DOMAIN_CLIENT)) 179 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 180 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 181 b __turn_mmu_on 182ENDPROC(__enable_mmu) 183 184/* 185 * Enable the MMU. This completely changes the structure of the visible 186 * memory space. You will not be able to trace execution through this. 187 * If you have an enquiry about this, *please* check the linux-arm-kernel 188 * mailing list archives BEFORE sending another post to the list. 189 * 190 * r0 = cp#15 control register 191 * r13 = *virtual* address to jump to upon completion 192 * 193 * other registers depend on the function called upon completion 194 */ 195 .align 5 196__turn_mmu_on: 197 mov r0, r0 198 mcr p15, 0, r0, c1, c0, 0 @ write control reg 199 mrc p15, 0, r3, c0, c0, 0 @ read id reg 200 mov r3, r3 201 mov r3, r13 202 mov pc, r3 203ENDPROC(__turn_mmu_on) 204 205 206/* 207 * Setup the initial page tables. We only setup the barest 208 * amount which are required to get the kernel running, which 209 * generally means mapping in the kernel code. 210 * 211 * r8 = machinfo 212 * r9 = cpuid 213 * r10 = procinfo 214 * 215 * Returns: 216 * r0, r3, r6, r7 corrupted 217 * r4 = physical page table address 218 */ 219__create_page_tables: 220 pgtbl r4 @ page table address 221 222 /* 223 * Clear the 16K level 1 swapper page table 224 */ 225 mov r0, r4 226 mov r3, #0 227 add r6, r0, #0x4000 2281: str r3, [r0], #4 229 str r3, [r0], #4 230 str r3, [r0], #4 231 str r3, [r0], #4 232 teq r0, r6 233 bne 1b 234 235 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 236 237 /* 238 * Create identity mapping for first MB of kernel to 239 * cater for the MMU enable. This identity mapping 240 * will be removed by paging_init(). We use our current program 241 * counter to determine corresponding section base address. 242 */ 243 mov r6, pc 244 mov r6, r6, lsr #20 @ start of kernel section 245 orr r3, r7, r6, lsl #20 @ flags + kernel base 246 str r3, [r4, r6, lsl #2] @ identity mapping 247 248 /* 249 * Now setup the pagetables for our kernel direct 250 * mapped region. 251 */ 252 add r0, r4, #(KERNEL_START & 0xff000000) >> 18 253 str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! 254 ldr r6, =(KERNEL_END - 1) 255 add r0, r0, #4 256 add r6, r4, r6, lsr #18 2571: cmp r0, r6 258 add r3, r3, #1 << 20 259 strls r3, [r0], #4 260 bls 1b 261 262#ifdef CONFIG_XIP_KERNEL 263 /* 264 * Map some ram to cover our .data and .bss areas. 265 */ 266 orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000) 267 .if (KERNEL_RAM_PADDR & 0x00f00000) 268 orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000) 269 .endif 270 add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18 271 str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]! 272 ldr r6, =(_end - 1) 273 add r0, r0, #4 274 add r6, r4, r6, lsr #18 2751: cmp r0, r6 276 add r3, r3, #1 << 20 277 strls r3, [r0], #4 278 bls 1b 279#endif 280 281 /* 282 * Then map first 1MB of ram in case it contains our boot params. 283 */ 284 add r0, r4, #PAGE_OFFSET >> 18 285 orr r6, r7, #(PHYS_OFFSET & 0xff000000) 286 .if (PHYS_OFFSET & 0x00f00000) 287 orr r6, r6, #(PHYS_OFFSET & 0x00f00000) 288 .endif 289 str r6, [r0] 290 291#ifdef CONFIG_DEBUG_LL 292 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 293 /* 294 * Map in IO space for serial debugging. 295 * This allows debug messages to be output 296 * via a serial console before paging_init. 297 */ 298 ldr r3, [r8, #MACHINFO_PGOFFIO] 299 add r0, r4, r3 300 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) 301 cmp r3, #0x0800 @ limit to 512MB 302 movhi r3, #0x0800 303 add r6, r0, r3 304 ldr r3, [r8, #MACHINFO_PHYSIO] 305 orr r3, r3, r7 3061: str r3, [r0], #4 307 add r3, r3, #1 << 20 308 teq r0, r6 309 bne 1b 310#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 311 /* 312 * If we're using the NetWinder or CATS, we also need to map 313 * in the 16550-type serial port for the debug messages 314 */ 315 add r0, r4, #0xff000000 >> 18 316 orr r3, r7, #0x7c000000 317 str r3, [r0] 318#endif 319#ifdef CONFIG_ARCH_RPC 320 /* 321 * Map in screen at 0x02000000 & SCREEN2_BASE 322 * Similar reasons here - for debug. This is 323 * only for Acorn RiscPC architectures. 324 */ 325 add r0, r4, #0x02000000 >> 18 326 orr r3, r7, #0x02000000 327 str r3, [r0] 328 add r0, r4, #0xd8000000 >> 18 329 str r3, [r0] 330#endif 331#endif 332 mov pc, lr 333ENDPROC(__create_page_tables) 334 .ltorg 335 336#include "head-common.S" 337