1/* 2 * linux/arch/arm/kernel/head.S 3 * 4 * Copyright (C) 1994-2002 Russell King 5 * Copyright (c) 2003 ARM Limited 6 * All Rights Reserved 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Kernel startup code for all 32-bit CPUs 13 */ 14#include <linux/linkage.h> 15#include <linux/init.h> 16 17#include <asm/assembler.h> 18#include <asm/domain.h> 19#include <asm/ptrace.h> 20#include <asm/asm-offsets.h> 21#include <asm/memory.h> 22#include <asm/thread_info.h> 23#include <asm/system.h> 24 25#if (PHYS_OFFSET & 0x001fffff) 26#error "PHYS_OFFSET must be at an even 2MiB boundary!" 27#endif 28 29#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 30#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET) 31 32#define ATAG_CORE 0x54410001 33#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) 34 35 36/* 37 * swapper_pg_dir is the virtual address of the initial page table. 38 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must 39 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect 40 * the least significant 16 bits to be 0x8000, but we could probably 41 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. 42 */ 43#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 44#error KERNEL_RAM_VADDR must start at 0xXXXX8000 45#endif 46 47 .globl swapper_pg_dir 48 .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000 49 50 .macro pgtbl, rd 51 ldr \rd, =(KERNEL_RAM_PADDR - 0x4000) 52 .endm 53 54#ifdef CONFIG_XIP_KERNEL 55#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 56#define KERNEL_END _edata_loc 57#else 58#define KERNEL_START KERNEL_RAM_VADDR 59#define KERNEL_END _end 60#endif 61 62/* 63 * Kernel startup entry point. 64 * --------------------------- 65 * 66 * This is normally called from the decompressor code. The requirements 67 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, 68 * r1 = machine nr, r2 = atags pointer. 69 * 70 * This code is mostly position independent, so if you link the kernel at 71 * 0xc0008000, you call this at __pa(0xc0008000). 72 * 73 * See linux/arch/arm/tools/mach-types for the complete list of machine 74 * numbers for r1. 75 * 76 * We're trying to keep crap to a minimum; DO NOT add any machine specific 77 * crap here - that's what the boot loader (or in extreme, well justified 78 * circumstances, zImage) is for. 79 */ 80 .section ".text.head", "ax" 81 .type stext, %function 82ENTRY(stext) 83 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 84 @ and irqs disabled 85 mrc p15, 0, r9, c0, c0 @ get processor id 86 bl __lookup_processor_type @ r5=procinfo r9=cpuid 87 movs r10, r5 @ invalid processor (r5=0)? 88 beq __error_p @ yes, error 'p' 89 bl __lookup_machine_type @ r5=machinfo 90 movs r8, r5 @ invalid machine (r5=0)? 91 beq __error_a @ yes, error 'a' 92 bl __vet_atags 93 bl __create_page_tables 94 95 /* 96 * The following calls CPU specific code in a position independent 97 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 98 * xxx_proc_info structure selected by __lookup_machine_type 99 * above. On return, the CPU will be ready for the MMU to be 100 * turned on, and r0 will hold the CPU control register value. 101 */ 102 ldr r13, __switch_data @ address to jump to after 103 @ mmu has been enabled 104 adr lr, __enable_mmu @ return (PIC) address 105 add pc, r10, #PROCINFO_INITFUNC 106 107#if defined(CONFIG_SMP) 108 .type secondary_startup, #function 109ENTRY(secondary_startup) 110 /* 111 * Common entry point for secondary CPUs. 112 * 113 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup 114 * the processor type - there is no need to check the machine type 115 * as it has already been validated by the primary processor. 116 */ 117 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 118 mrc p15, 0, r9, c0, c0 @ get processor id 119 bl __lookup_processor_type 120 movs r10, r5 @ invalid processor? 121 moveq r0, #'p' @ yes, error 'p' 122 beq __error 123 124 /* 125 * Use the page tables supplied from __cpu_up. 126 */ 127 adr r4, __secondary_data 128 ldmia r4, {r5, r7, r13} @ address to jump to after 129 sub r4, r4, r5 @ mmu has been enabled 130 ldr r4, [r7, r4] @ get secondary_data.pgdir 131 adr lr, __enable_mmu @ return address 132 add pc, r10, #PROCINFO_INITFUNC @ initialise processor 133 @ (return control reg) 134 135 /* 136 * r6 = &secondary_data 137 */ 138ENTRY(__secondary_switched) 139 ldr sp, [r7, #4] @ get secondary_data.stack 140 mov fp, #0 141 b secondary_start_kernel 142 143 .type __secondary_data, %object 144__secondary_data: 145 .long . 146 .long secondary_data 147 .long __secondary_switched 148#endif /* defined(CONFIG_SMP) */ 149 150 151 152/* 153 * Setup common bits before finally enabling the MMU. Essentially 154 * this is just loading the page table pointer and domain access 155 * registers. 156 */ 157 .type __enable_mmu, %function 158__enable_mmu: 159#ifdef CONFIG_ALIGNMENT_TRAP 160 orr r0, r0, #CR_A 161#else 162 bic r0, r0, #CR_A 163#endif 164#ifdef CONFIG_CPU_DCACHE_DISABLE 165 bic r0, r0, #CR_C 166#endif 167#ifdef CONFIG_CPU_BPREDICT_DISABLE 168 bic r0, r0, #CR_Z 169#endif 170#ifdef CONFIG_CPU_ICACHE_DISABLE 171 bic r0, r0, #CR_I 172#endif 173 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 174 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 175 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 176 domain_val(DOMAIN_IO, DOMAIN_CLIENT)) 177 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 178 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 179 b __turn_mmu_on 180 181/* 182 * Enable the MMU. This completely changes the structure of the visible 183 * memory space. You will not be able to trace execution through this. 184 * If you have an enquiry about this, *please* check the linux-arm-kernel 185 * mailing list archives BEFORE sending another post to the list. 186 * 187 * r0 = cp#15 control register 188 * r13 = *virtual* address to jump to upon completion 189 * 190 * other registers depend on the function called upon completion 191 */ 192 .align 5 193 .type __turn_mmu_on, %function 194__turn_mmu_on: 195 mov r0, r0 196 mcr p15, 0, r0, c1, c0, 0 @ write control reg 197 mrc p15, 0, r3, c0, c0, 0 @ read id reg 198 mov r3, r3 199 mov r3, r3 200 mov pc, r13 201 202 203 204/* 205 * Setup the initial page tables. We only setup the barest 206 * amount which are required to get the kernel running, which 207 * generally means mapping in the kernel code. 208 * 209 * r8 = machinfo 210 * r9 = cpuid 211 * r10 = procinfo 212 * 213 * Returns: 214 * r0, r3, r6, r7 corrupted 215 * r4 = physical page table address 216 */ 217 .type __create_page_tables, %function 218__create_page_tables: 219 pgtbl r4 @ page table address 220 221 /* 222 * Clear the 16K level 1 swapper page table 223 */ 224 mov r0, r4 225 mov r3, #0 226 add r6, r0, #0x4000 2271: str r3, [r0], #4 228 str r3, [r0], #4 229 str r3, [r0], #4 230 str r3, [r0], #4 231 teq r0, r6 232 bne 1b 233 234 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 235 236 /* 237 * Create identity mapping for first MB of kernel to 238 * cater for the MMU enable. This identity mapping 239 * will be removed by paging_init(). We use our current program 240 * counter to determine corresponding section base address. 241 */ 242 mov r6, pc, lsr #20 @ start of kernel section 243 orr r3, r7, r6, lsl #20 @ flags + kernel base 244 str r3, [r4, r6, lsl #2] @ identity mapping 245 246 /* 247 * Now setup the pagetables for our kernel direct 248 * mapped region. 249 */ 250 add r0, r4, #(KERNEL_START & 0xff000000) >> 18 251 str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! 252 ldr r6, =(KERNEL_END - 1) 253 add r0, r0, #4 254 add r6, r4, r6, lsr #18 2551: cmp r0, r6 256 add r3, r3, #1 << 20 257 strls r3, [r0], #4 258 bls 1b 259 260#ifdef CONFIG_XIP_KERNEL 261 /* 262 * Map some ram to cover our .data and .bss areas. 263 */ 264 orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000) 265 .if (KERNEL_RAM_PADDR & 0x00f00000) 266 orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000) 267 .endif 268 add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18 269 str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]! 270 ldr r6, =(_end - 1) 271 add r0, r0, #4 272 add r6, r4, r6, lsr #18 2731: cmp r0, r6 274 add r3, r3, #1 << 20 275 strls r3, [r0], #4 276 bls 1b 277#endif 278 279 /* 280 * Then map first 1MB of ram in case it contains our boot params. 281 */ 282 add r0, r4, #PAGE_OFFSET >> 18 283 orr r6, r7, #(PHYS_OFFSET & 0xff000000) 284 .if (PHYS_OFFSET & 0x00f00000) 285 orr r6, r6, #(PHYS_OFFSET & 0x00f00000) 286 .endif 287 str r6, [r0] 288 289#ifdef CONFIG_DEBUG_LL 290 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 291 /* 292 * Map in IO space for serial debugging. 293 * This allows debug messages to be output 294 * via a serial console before paging_init. 295 */ 296 ldr r3, [r8, #MACHINFO_PGOFFIO] 297 add r0, r4, r3 298 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) 299 cmp r3, #0x0800 @ limit to 512MB 300 movhi r3, #0x0800 301 add r6, r0, r3 302 ldr r3, [r8, #MACHINFO_PHYSIO] 303 orr r3, r3, r7 3041: str r3, [r0], #4 305 add r3, r3, #1 << 20 306 teq r0, r6 307 bne 1b 308#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 309 /* 310 * If we're using the NetWinder or CATS, we also need to map 311 * in the 16550-type serial port for the debug messages 312 */ 313 add r0, r4, #0xff000000 >> 18 314 orr r3, r7, #0x7c000000 315 str r3, [r0] 316#endif 317#ifdef CONFIG_ARCH_RPC 318 /* 319 * Map in screen at 0x02000000 & SCREEN2_BASE 320 * Similar reasons here - for debug. This is 321 * only for Acorn RiscPC architectures. 322 */ 323 add r0, r4, #0x02000000 >> 18 324 orr r3, r7, #0x02000000 325 str r3, [r0] 326 add r0, r4, #0xd8000000 >> 18 327 str r3, [r0] 328#endif 329#endif 330 mov pc, lr 331 .ltorg 332 333#include "head-common.S" 334