1/* 2 * linux/arch/arm/kernel/head.S 3 * 4 * Copyright (C) 1994-2002 Russell King 5 * Copyright (c) 2003 ARM Limited 6 * All Rights Reserved 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Kernel startup code for all 32-bit CPUs 13 */ 14#include <linux/linkage.h> 15#include <linux/init.h> 16 17#include <asm/assembler.h> 18#include <asm/cp15.h> 19#include <asm/domain.h> 20#include <asm/ptrace.h> 21#include <asm/asm-offsets.h> 22#include <asm/memory.h> 23#include <asm/thread_info.h> 24#include <asm/pgtable.h> 25 26#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) 27#include CONFIG_DEBUG_LL_INCLUDE 28#endif 29 30/* 31 * swapper_pg_dir is the virtual address of the initial page table. 32 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must 33 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect 34 * the least significant 16 bits to be 0x8000, but we could probably 35 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. 36 */ 37#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 38#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 39#error KERNEL_RAM_VADDR must start at 0xXXXX8000 40#endif 41 42#ifdef CONFIG_ARM_LPAE 43 /* LPAE requires an additional page for the PGD */ 44#define PG_DIR_SIZE 0x5000 45#define PMD_ORDER 3 46#else 47#define PG_DIR_SIZE 0x4000 48#define PMD_ORDER 2 49#endif 50 51 .globl swapper_pg_dir 52 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE 53 54 .macro pgtbl, rd, phys 55 add \rd, \phys, #TEXT_OFFSET 56 sub \rd, \rd, #PG_DIR_SIZE 57 .endm 58 59/* 60 * Kernel startup entry point. 61 * --------------------------- 62 * 63 * This is normally called from the decompressor code. The requirements 64 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, 65 * r1 = machine nr, r2 = atags or dtb pointer. 66 * 67 * This code is mostly position independent, so if you link the kernel at 68 * 0xc0008000, you call this at __pa(0xc0008000). 69 * 70 * See linux/arch/arm/tools/mach-types for the complete list of machine 71 * numbers for r1. 72 * 73 * We're trying to keep crap to a minimum; DO NOT add any machine specific 74 * crap here - that's what the boot loader (or in extreme, well justified 75 * circumstances, zImage) is for. 76 */ 77 .arm 78 79 __HEAD 80ENTRY(stext) 81 ARM_BE8(setend be ) @ ensure we are in BE8 mode 82 83 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. 84 THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 85 THUMB( .thumb ) @ switch to Thumb now. 86 THUMB(1: ) 87 88#ifdef CONFIG_ARM_VIRT_EXT 89 bl __hyp_stub_install 90#endif 91 @ ensure svc mode and all interrupts masked 92 safe_svcmode_maskall r9 93 94 mrc p15, 0, r9, c0, c0 @ get processor id 95 bl __lookup_processor_type @ r5=procinfo r9=cpuid 96 movs r10, r5 @ invalid processor (r5=0)? 97 THUMB( it eq ) @ force fixup-able long branch encoding 98 beq __error_p @ yes, error 'p' 99 100#ifdef CONFIG_ARM_LPAE 101 mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0 102 and r3, r3, #0xf @ extract VMSA support 103 cmp r3, #5 @ long-descriptor translation table format? 104 THUMB( it lo ) @ force fixup-able long branch encoding 105 blo __error_p @ only classic page table format 106#endif 107 108#ifndef CONFIG_XIP_KERNEL 109 adr r3, 2f 110 ldmia r3, {r4, r8} 111 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 112 add r8, r8, r4 @ PHYS_OFFSET 113#else 114 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case 115#endif 116 117 /* 118 * r1 = machine no, r2 = atags or dtb, 119 * r8 = phys_offset, r9 = cpuid, r10 = procinfo 120 */ 121 bl __vet_atags 122#ifdef CONFIG_SMP_ON_UP 123 bl __fixup_smp 124#endif 125#ifdef CONFIG_ARM_PATCH_PHYS_VIRT 126 bl __fixup_pv_table 127#endif 128 bl __create_page_tables 129 130 /* 131 * The following calls CPU specific code in a position independent 132 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 133 * xxx_proc_info structure selected by __lookup_processor_type 134 * above. On return, the CPU will be ready for the MMU to be 135 * turned on, and r0 will hold the CPU control register value. 136 */ 137 ldr r13, =__mmap_switched @ address to jump to after 138 @ mmu has been enabled 139 adr lr, BSYM(1f) @ return (PIC) address 140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 141 ARM( add pc, r10, #PROCINFO_INITFUNC ) 142 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 143 THUMB( mov pc, r12 ) 1441: b __enable_mmu 145ENDPROC(stext) 146 .ltorg 147#ifndef CONFIG_XIP_KERNEL 1482: .long . 149 .long PAGE_OFFSET 150#endif 151 152/* 153 * Setup the initial page tables. We only setup the barest 154 * amount which are required to get the kernel running, which 155 * generally means mapping in the kernel code. 156 * 157 * r8 = phys_offset, r9 = cpuid, r10 = procinfo 158 * 159 * Returns: 160 * r0, r3, r5-r7 corrupted 161 * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) 162 */ 163__create_page_tables: 164 pgtbl r4, r8 @ page table address 165 166 /* 167 * Clear the swapper page table 168 */ 169 mov r0, r4 170 mov r3, #0 171 add r6, r0, #PG_DIR_SIZE 1721: str r3, [r0], #4 173 str r3, [r0], #4 174 str r3, [r0], #4 175 str r3, [r0], #4 176 teq r0, r6 177 bne 1b 178 179#ifdef CONFIG_ARM_LPAE 180 /* 181 * Build the PGD table (first level) to point to the PMD table. A PGD 182 * entry is 64-bit wide. 183 */ 184 mov r0, r4 185 add r3, r4, #0x1000 @ first PMD table address 186 orr r3, r3, #3 @ PGD block type 187 mov r6, #4 @ PTRS_PER_PGD 188 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER 1891: 190#ifdef CONFIG_CPU_ENDIAN_BE8 191 str r7, [r0], #4 @ set top PGD entry bits 192 str r3, [r0], #4 @ set bottom PGD entry bits 193#else 194 str r3, [r0], #4 @ set bottom PGD entry bits 195 str r7, [r0], #4 @ set top PGD entry bits 196#endif 197 add r3, r3, #0x1000 @ next PMD table 198 subs r6, r6, #1 199 bne 1b 200 201 add r4, r4, #0x1000 @ point to the PMD tables 202#ifdef CONFIG_CPU_ENDIAN_BE8 203 add r4, r4, #4 @ we only write the bottom word 204#endif 205#endif 206 207 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 208 209 /* 210 * Create identity mapping to cater for __enable_mmu. 211 * This identity mapping will be removed by paging_init(). 212 */ 213 adr r0, __turn_mmu_on_loc 214 ldmia r0, {r3, r5, r6} 215 sub r0, r0, r3 @ virt->phys offset 216 add r5, r5, r0 @ phys __turn_mmu_on 217 add r6, r6, r0 @ phys __turn_mmu_on_end 218 mov r5, r5, lsr #SECTION_SHIFT 219 mov r6, r6, lsr #SECTION_SHIFT 220 2211: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base 222 str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping 223 cmp r5, r6 224 addlo r5, r5, #1 @ next section 225 blo 1b 226 227 /* 228 * Map our RAM from the start to the end of the kernel .bss section. 229 */ 230 add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) 231 ldr r6, =(_end - 1) 232 orr r3, r8, r7 233 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 2341: str r3, [r0], #1 << PMD_ORDER 235 add r3, r3, #1 << SECTION_SHIFT 236 cmp r0, r6 237 bls 1b 238 239#ifdef CONFIG_XIP_KERNEL 240 /* 241 * Map the kernel image separately as it is not located in RAM. 242 */ 243#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 244 mov r3, pc 245 mov r3, r3, lsr #SECTION_SHIFT 246 orr r3, r7, r3, lsl #SECTION_SHIFT 247 add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) 248 str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! 249 ldr r6, =(_edata_loc - 1) 250 add r0, r0, #1 << PMD_ORDER 251 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 2521: cmp r0, r6 253 add r3, r3, #1 << SECTION_SHIFT 254 strls r3, [r0], #1 << PMD_ORDER 255 bls 1b 256#endif 257 258 /* 259 * Then map boot params address in r2 if specified. 260 * We map 2 sections in case the ATAGs/DTB crosses a section boundary. 261 */ 262 mov r0, r2, lsr #SECTION_SHIFT 263 movs r0, r0, lsl #SECTION_SHIFT 264 subne r3, r0, r8 265 addne r3, r3, #PAGE_OFFSET 266 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 267 orrne r6, r7, r0 268 strne r6, [r3], #1 << PMD_ORDER 269 addne r6, r6, #1 << SECTION_SHIFT 270 strne r6, [r3] 271 272#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) 273 sub r4, r4, #4 @ Fixup page table pointer 274 @ for 64-bit descriptors 275#endif 276 277#ifdef CONFIG_DEBUG_LL 278#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) 279 /* 280 * Map in IO space for serial debugging. 281 * This allows debug messages to be output 282 * via a serial console before paging_init. 283 */ 284 addruart r7, r3, r0 285 286 mov r3, r3, lsr #SECTION_SHIFT 287 mov r3, r3, lsl #PMD_ORDER 288 289 add r0, r4, r3 290 mov r3, r7, lsr #SECTION_SHIFT 291 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 292 orr r3, r7, r3, lsl #SECTION_SHIFT 293#ifdef CONFIG_ARM_LPAE 294 mov r7, #1 << (54 - 32) @ XN 295#ifdef CONFIG_CPU_ENDIAN_BE8 296 str r7, [r0], #4 297 str r3, [r0], #4 298#else 299 str r3, [r0], #4 300 str r7, [r0], #4 301#endif 302#else 303 orr r3, r3, #PMD_SECT_XN 304 str r3, [r0], #4 305#endif 306 307#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ 308 /* we don't need any serial debugging mappings */ 309 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 310#endif 311 312#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 313 /* 314 * If we're using the NetWinder or CATS, we also need to map 315 * in the 16550-type serial port for the debug messages 316 */ 317 add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER) 318 orr r3, r7, #0x7c000000 319 str r3, [r0] 320#endif 321#ifdef CONFIG_ARCH_RPC 322 /* 323 * Map in screen at 0x02000000 & SCREEN2_BASE 324 * Similar reasons here - for debug. This is 325 * only for Acorn RiscPC architectures. 326 */ 327 add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER) 328 orr r3, r7, #0x02000000 329 str r3, [r0] 330 add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) 331 str r3, [r0] 332#endif 333#endif 334#ifdef CONFIG_ARM_LPAE 335 sub r4, r4, #0x1000 @ point to the PGD table 336 mov r4, r4, lsr #ARCH_PGD_SHIFT 337#endif 338 mov pc, lr 339ENDPROC(__create_page_tables) 340 .ltorg 341 .align 342__turn_mmu_on_loc: 343 .long . 344 .long __turn_mmu_on 345 .long __turn_mmu_on_end 346 347#if defined(CONFIG_SMP) 348 .text 349ENTRY(secondary_startup) 350 /* 351 * Common entry point for secondary CPUs. 352 * 353 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup 354 * the processor type - there is no need to check the machine type 355 * as it has already been validated by the primary processor. 356 */ 357 358 ARM_BE8(setend be) @ ensure we are in BE8 mode 359 360#ifdef CONFIG_ARM_VIRT_EXT 361 bl __hyp_stub_install_secondary 362#endif 363 safe_svcmode_maskall r9 364 365 mrc p15, 0, r9, c0, c0 @ get processor id 366 bl __lookup_processor_type 367 movs r10, r5 @ invalid processor? 368 moveq r0, #'p' @ yes, error 'p' 369 THUMB( it eq ) @ force fixup-able long branch encoding 370 beq __error_p 371 372 /* 373 * Use the page tables supplied from __cpu_up. 374 */ 375 adr r4, __secondary_data 376 ldmia r4, {r5, r7, r12} @ address to jump to after 377 sub lr, r4, r5 @ mmu has been enabled 378 ldr r4, [r7, lr] @ get secondary_data.pgdir 379 add r7, r7, #4 380 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir 381 adr lr, BSYM(__enable_mmu) @ return address 382 mov r13, r12 @ __secondary_switched address 383 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 384 @ (return control reg) 385 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 386 THUMB( mov pc, r12 ) 387ENDPROC(secondary_startup) 388 389 /* 390 * r6 = &secondary_data 391 */ 392ENTRY(__secondary_switched) 393 ldr sp, [r7, #4] @ get secondary_data.stack 394 mov fp, #0 395 b secondary_start_kernel 396ENDPROC(__secondary_switched) 397 398 .align 399 400 .type __secondary_data, %object 401__secondary_data: 402 .long . 403 .long secondary_data 404 .long __secondary_switched 405#endif /* defined(CONFIG_SMP) */ 406 407 408 409/* 410 * Setup common bits before finally enabling the MMU. Essentially 411 * this is just loading the page table pointer and domain access 412 * registers. 413 * 414 * r0 = cp#15 control register 415 * r1 = machine ID 416 * r2 = atags or dtb pointer 417 * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) 418 * r9 = processor ID 419 * r13 = *virtual* address to jump to upon completion 420 */ 421__enable_mmu: 422#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 423 orr r0, r0, #CR_A 424#else 425 bic r0, r0, #CR_A 426#endif 427#ifdef CONFIG_CPU_DCACHE_DISABLE 428 bic r0, r0, #CR_C 429#endif 430#ifdef CONFIG_CPU_BPREDICT_DISABLE 431 bic r0, r0, #CR_Z 432#endif 433#ifdef CONFIG_CPU_ICACHE_DISABLE 434 bic r0, r0, #CR_I 435#endif 436#ifndef CONFIG_ARM_LPAE 437 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 438 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 439 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 440 domain_val(DOMAIN_IO, DOMAIN_CLIENT)) 441 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 442 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 443#endif 444 b __turn_mmu_on 445ENDPROC(__enable_mmu) 446 447/* 448 * Enable the MMU. This completely changes the structure of the visible 449 * memory space. You will not be able to trace execution through this. 450 * If you have an enquiry about this, *please* check the linux-arm-kernel 451 * mailing list archives BEFORE sending another post to the list. 452 * 453 * r0 = cp#15 control register 454 * r1 = machine ID 455 * r2 = atags or dtb pointer 456 * r9 = processor ID 457 * r13 = *virtual* address to jump to upon completion 458 * 459 * other registers depend on the function called upon completion 460 */ 461 .align 5 462 .pushsection .idmap.text, "ax" 463ENTRY(__turn_mmu_on) 464 mov r0, r0 465 instr_sync 466 mcr p15, 0, r0, c1, c0, 0 @ write control reg 467 mrc p15, 0, r3, c0, c0, 0 @ read id reg 468 instr_sync 469 mov r3, r3 470 mov r3, r13 471 mov pc, r3 472__turn_mmu_on_end: 473ENDPROC(__turn_mmu_on) 474 .popsection 475 476 477#ifdef CONFIG_SMP_ON_UP 478 __INIT 479__fixup_smp: 480 and r3, r9, #0x000f0000 @ architecture version 481 teq r3, #0x000f0000 @ CPU ID supported? 482 bne __fixup_smp_on_up @ no, assume UP 483 484 bic r3, r9, #0x00ff0000 485 bic r3, r3, #0x0000000f @ mask 0xff00fff0 486 mov r4, #0x41000000 487 orr r4, r4, #0x0000b000 488 orr r4, r4, #0x00000020 @ val 0x4100b020 489 teq r3, r4 @ ARM 11MPCore? 490 moveq pc, lr @ yes, assume SMP 491 492 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 493 and r0, r0, #0xc0000000 @ multiprocessing extensions and 494 teq r0, #0x80000000 @ not part of a uniprocessor system? 495 bne __fixup_smp_on_up @ no, assume UP 496 497 @ Core indicates it is SMP. Check for Aegis SOC where a single 498 @ Cortex-A9 CPU is present but SMP operations fault. 499 mov r4, #0x41000000 500 orr r4, r4, #0x0000c000 501 orr r4, r4, #0x00000090 502 teq r3, r4 @ Check for ARM Cortex-A9 503 movne pc, lr @ Not ARM Cortex-A9, 504 505 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the 506 @ below address check will need to be #ifdef'd or equivalent 507 @ for the Aegis platform. 508 mrc p15, 4, r0, c15, c0 @ get SCU base address 509 teq r0, #0x0 @ '0' on actual UP A9 hardware 510 beq __fixup_smp_on_up @ So its an A9 UP 511 ldr r0, [r0, #4] @ read SCU Config 512ARM_BE8(rev r0, r0) @ byteswap if big endian 513 and r0, r0, #0x3 @ number of CPUs 514 teq r0, #0x0 @ is 1? 515 movne pc, lr 516 517__fixup_smp_on_up: 518 adr r0, 1f 519 ldmia r0, {r3 - r5} 520 sub r3, r0, r3 521 add r4, r4, r3 522 add r5, r5, r3 523 b __do_fixup_smp_on_up 524ENDPROC(__fixup_smp) 525 526 .align 5271: .word . 528 .word __smpalt_begin 529 .word __smpalt_end 530 531 .pushsection .data 532 .globl smp_on_up 533smp_on_up: 534 ALT_SMP(.long 1) 535 ALT_UP(.long 0) 536 .popsection 537#endif 538 539 .text 540__do_fixup_smp_on_up: 541 cmp r4, r5 542 movhs pc, lr 543 ldmia r4!, {r0, r6} 544 ARM( str r6, [r0, r3] ) 545 THUMB( add r0, r0, r3 ) 546#ifdef __ARMEB__ 547 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. 548#endif 549 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords 550 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. 551 THUMB( strh r6, [r0] ) 552 b __do_fixup_smp_on_up 553ENDPROC(__do_fixup_smp_on_up) 554 555ENTRY(fixup_smp) 556 stmfd sp!, {r4 - r6, lr} 557 mov r4, r0 558 add r5, r0, r1 559 mov r3, #0 560 bl __do_fixup_smp_on_up 561 ldmfd sp!, {r4 - r6, pc} 562ENDPROC(fixup_smp) 563 564#ifdef __ARMEB__ 565#define LOW_OFFSET 0x4 566#define HIGH_OFFSET 0x0 567#else 568#define LOW_OFFSET 0x0 569#define HIGH_OFFSET 0x4 570#endif 571 572#ifdef CONFIG_ARM_PATCH_PHYS_VIRT 573 574/* __fixup_pv_table - patch the stub instructions with the delta between 575 * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and 576 * can be expressed by an immediate shifter operand. The stub instruction 577 * has a form of '(add|sub) rd, rn, #imm'. 578 */ 579 __HEAD 580__fixup_pv_table: 581 adr r0, 1f 582 ldmia r0, {r3-r7} 583 mvn ip, #0 584 subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET 585 add r4, r4, r3 @ adjust table start address 586 add r5, r5, r3 @ adjust table end address 587 add r6, r6, r3 @ adjust __pv_phys_offset address 588 add r7, r7, r3 @ adjust __pv_offset address 589 str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset 590 strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits 591 mov r6, r3, lsr #24 @ constant for add/sub instructions 592 teq r3, r6, lsl #24 @ must be 16MiB aligned 593THUMB( it ne @ cross section branch ) 594 bne __error 595 str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits 596 b __fixup_a_pv_table 597ENDPROC(__fixup_pv_table) 598 599 .align 6001: .long . 601 .long __pv_table_begin 602 .long __pv_table_end 6032: .long __pv_phys_offset 604 .long __pv_offset 605 606 .text 607__fixup_a_pv_table: 608 adr r0, 3f 609 ldr r6, [r0] 610 add r6, r6, r3 611 ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word 612 ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word 613 mov r6, r6, lsr #24 614 cmn r0, #1 615#ifdef CONFIG_THUMB2_KERNEL 616 moveq r0, #0x200000 @ set bit 21, mov to mvn instruction 617 lsls r6, #24 618 beq 2f 619 clz r7, r6 620 lsr r6, #24 621 lsl r6, r7 622 bic r6, #0x0080 623 lsrs r7, #1 624 orrcs r6, #0x0080 625 orr r6, r6, r7, lsl #12 626 orr r6, #0x4000 627 b 2f 6281: add r7, r3 629 ldrh ip, [r7, #2] 630ARM_BE8(rev16 ip, ip) 631 tst ip, #0x4000 632 and ip, #0x8f00 633 orrne ip, r6 @ mask in offset bits 31-24 634 orreq ip, r0 @ mask in offset bits 7-0 635ARM_BE8(rev16 ip, ip) 636 strh ip, [r7, #2] 637 bne 2f 638 ldrh ip, [r7] 639ARM_BE8(rev16 ip, ip) 640 bic ip, #0x20 641 orr ip, ip, r0, lsr #16 642ARM_BE8(rev16 ip, ip) 643 strh ip, [r7] 6442: cmp r4, r5 645 ldrcc r7, [r4], #4 @ use branch for delay slot 646 bcc 1b 647 bx lr 648#else 649#ifdef CONFIG_CPU_ENDIAN_BE8 650 moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction 651#else 652 moveq r0, #0x400000 @ set bit 22, mov to mvn instruction 653#endif 654 b 2f 6551: ldr ip, [r7, r3] 656#ifdef CONFIG_CPU_ENDIAN_BE8 657 @ in BE8, we load data in BE, but instructions still in LE 658 bic ip, ip, #0xff000000 659 tst ip, #0x000f0000 @ check the rotation field 660 orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 661 biceq ip, ip, #0x00004000 @ clear bit 22 662 orreq ip, ip, r0 @ mask in offset bits 7-0 663#else 664 bic ip, ip, #0x000000ff 665 tst ip, #0xf00 @ check the rotation field 666 orrne ip, ip, r6 @ mask in offset bits 31-24 667 biceq ip, ip, #0x400000 @ clear bit 22 668 orreq ip, ip, r0 @ mask in offset bits 7-0 669#endif 670 str ip, [r7, r3] 6712: cmp r4, r5 672 ldrcc r7, [r4], #4 @ use branch for delay slot 673 bcc 1b 674 mov pc, lr 675#endif 676ENDPROC(__fixup_a_pv_table) 677 678 .align 6793: .long __pv_offset 680 681ENTRY(fixup_pv_table) 682 stmfd sp!, {r4 - r7, lr} 683 mov r3, #0 @ no offset 684 mov r4, r0 @ r0 = table start 685 add r5, r0, r1 @ r1 = table size 686 bl __fixup_a_pv_table 687 ldmfd sp!, {r4 - r7, pc} 688ENDPROC(fixup_pv_table) 689 690 .data 691 .globl __pv_phys_offset 692 .type __pv_phys_offset, %object 693__pv_phys_offset: 694 .quad 0 695 .size __pv_phys_offset, . -__pv_phys_offset 696 697 .globl __pv_offset 698 .type __pv_offset, %object 699__pv_offset: 700 .quad 0 701 .size __pv_offset, . -__pv_offset 702#endif 703 704#include "head-common.S" 705