1/* 2 * linux/arch/arm/boot/compressed/head.S 3 * 4 * Copyright (C) 1996-2002 Russell King 5 * Copyright (C) 2004 Hyok S. Choi (MPU support) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/linkage.h> 12#include <asm/assembler.h> 13 14 .arch armv7-a 15/* 16 * Debugging stuff 17 * 18 * Note that these macros must not contain any code which is not 19 * 100% relocatable. Any attempt to do so will result in a crash. 20 * Please select one of the following when turning on debugging. 21 */ 22#ifdef DEBUG 23 24#if defined(CONFIG_DEBUG_ICEDCC) 25 26#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 27 .macro loadsp, rb, tmp 28 .endm 29 .macro writeb, ch, rb 30 mcr p14, 0, \ch, c0, c5, 0 31 .endm 32#elif defined(CONFIG_CPU_XSCALE) 33 .macro loadsp, rb, tmp 34 .endm 35 .macro writeb, ch, rb 36 mcr p14, 0, \ch, c8, c0, 0 37 .endm 38#else 39 .macro loadsp, rb, tmp 40 .endm 41 .macro writeb, ch, rb 42 mcr p14, 0, \ch, c1, c0, 0 43 .endm 44#endif 45 46#else 47 48#include CONFIG_DEBUG_LL_INCLUDE 49 50 .macro writeb, ch, rb 51 senduart \ch, \rb 52 .endm 53 54#if defined(CONFIG_ARCH_SA1100) 55 .macro loadsp, rb, tmp 56 mov \rb, #0x80000000 @ physical base address 57#ifdef CONFIG_DEBUG_LL_SER3 58 add \rb, \rb, #0x00050000 @ Ser3 59#else 60 add \rb, \rb, #0x00010000 @ Ser1 61#endif 62 .endm 63#else 64 .macro loadsp, rb, tmp 65 addruart \rb, \tmp 66 .endm 67#endif 68#endif 69#endif 70 71 .macro kputc,val 72 mov r0, \val 73 bl putc 74 .endm 75 76 .macro kphex,val,len 77 mov r0, \val 78 mov r1, #\len 79 bl phex 80 .endm 81 82 .macro debug_reloc_start 83#ifdef DEBUG 84 kputc #'\n' 85 kphex r6, 8 /* processor id */ 86 kputc #':' 87 kphex r7, 8 /* architecture id */ 88#ifdef CONFIG_CPU_CP15 89 kputc #':' 90 mrc p15, 0, r0, c1, c0 91 kphex r0, 8 /* control reg */ 92#endif 93 kputc #'\n' 94 kphex r5, 8 /* decompressed kernel start */ 95 kputc #'-' 96 kphex r9, 8 /* decompressed kernel end */ 97 kputc #'>' 98 kphex r4, 8 /* kernel execution address */ 99 kputc #'\n' 100#endif 101 .endm 102 103 .macro debug_reloc_end 104#ifdef DEBUG 105 kphex r5, 8 /* end of kernel */ 106 kputc #'\n' 107 mov r0, r4 108 bl memdump /* dump 256 bytes at start of kernel */ 109#endif 110 .endm 111 112 .section ".start", #alloc, #execinstr 113/* 114 * sort out different calling conventions 115 */ 116 .align 117 .arm @ Always enter in ARM state 118start: 119 .type start,#function 120 .rept 7 121 mov r0, r0 122 .endr 123 ARM( mov r0, r0 ) 124 ARM( b 1f ) 125 THUMB( adr r12, BSYM(1f) ) 126 THUMB( bx r12 ) 127 128 .word _magic_sig @ Magic numbers to help the loader 129 .word _magic_start @ absolute load/run zImage address 130 .word _magic_end @ zImage end address 131 .word 0x04030201 @ endianness flag 132 133 THUMB( .thumb ) 1341: 135 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 136 mrs r9, cpsr 137#ifdef CONFIG_ARM_VIRT_EXT 138 bl __hyp_stub_install @ get into SVC mode, reversibly 139#endif 140 mov r7, r1 @ save architecture ID 141 mov r8, r2 @ save atags pointer 142 143 /* 144 * Booting from Angel - need to enter SVC mode and disable 145 * FIQs/IRQs (numeric definitions from angel arm.h source). 146 * We only do this if we were in user mode on entry. 147 */ 148 mrs r2, cpsr @ get current mode 149 tst r2, #3 @ not user? 150 bne not_angel 151 mov r0, #0x17 @ angel_SWIreason_EnterSVC 152 ARM( swi 0x123456 ) @ angel_SWI_ARM 153 THUMB( svc 0xab ) @ angel_SWI_THUMB 154not_angel: 155 safe_svcmode_maskall r0 156 msr spsr_cxsf, r9 @ Save the CPU boot mode in 157 @ SPSR 158 /* 159 * Note that some cache flushing and other stuff may 160 * be needed here - is there an Angel SWI call for this? 161 */ 162 163 /* 164 * some architecture specific code can be inserted 165 * by the linker here, but it should preserve r7, r8, and r9. 166 */ 167 168 .text 169 170#ifdef CONFIG_AUTO_ZRELADDR 171 @ determine final kernel image address 172 mov r4, pc 173 and r4, r4, #0xf8000000 174 add r4, r4, #TEXT_OFFSET 175#else 176 ldr r4, =zreladdr 177#endif 178 179 /* 180 * Set up a page table only if it won't overwrite ourself. 181 * That means r4 < pc && r4 - 16k page directory > &_end. 182 * Given that r4 > &_end is most unfrequent, we add a rough 183 * additional 1MB of room for a possible appended DTB. 184 */ 185 mov r0, pc 186 cmp r0, r4 187 ldrcc r0, LC0+32 188 addcc r0, r0, pc 189 cmpcc r4, r0 190 orrcc r4, r4, #1 @ remember we skipped cache_on 191 blcs cache_on 192 193restart: adr r0, LC0 194 ldmia r0, {r1, r2, r3, r6, r10, r11, r12} 195 ldr sp, [r0, #28] 196 197 /* 198 * We might be running at a different address. We need 199 * to fix up various pointers. 200 */ 201 sub r0, r0, r1 @ calculate the delta offset 202 add r6, r6, r0 @ _edata 203 add r10, r10, r0 @ inflated kernel size location 204 205 /* 206 * The kernel build system appends the size of the 207 * decompressed kernel at the end of the compressed data 208 * in little-endian form. 209 */ 210 ldrb r9, [r10, #0] 211 ldrb lr, [r10, #1] 212 orr r9, r9, lr, lsl #8 213 ldrb lr, [r10, #2] 214 ldrb r10, [r10, #3] 215 orr r9, r9, lr, lsl #16 216 orr r9, r9, r10, lsl #24 217 218#ifndef CONFIG_ZBOOT_ROM 219 /* malloc space is above the relocated stack (64k max) */ 220 add sp, sp, r0 221 add r10, sp, #0x10000 222#else 223 /* 224 * With ZBOOT_ROM the bss/stack is non relocatable, 225 * but someone could still run this code from RAM, 226 * in which case our reference is _edata. 227 */ 228 mov r10, r6 229#endif 230 231 mov r5, #0 @ init dtb size to 0 232#ifdef CONFIG_ARM_APPENDED_DTB 233/* 234 * r0 = delta 235 * r2 = BSS start 236 * r3 = BSS end 237 * r4 = final kernel address (possibly with LSB set) 238 * r5 = appended dtb size (still unknown) 239 * r6 = _edata 240 * r7 = architecture ID 241 * r8 = atags/device tree pointer 242 * r9 = size of decompressed image 243 * r10 = end of this image, including bss/stack/malloc space if non XIP 244 * r11 = GOT start 245 * r12 = GOT end 246 * sp = stack pointer 247 * 248 * if there are device trees (dtb) appended to zImage, advance r10 so that the 249 * dtb data will get relocated along with the kernel if necessary. 250 */ 251 252 ldr lr, [r6, #0] 253#ifndef __ARMEB__ 254 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian 255#else 256 ldr r1, =0xd00dfeed 257#endif 258 cmp lr, r1 259 bne dtb_check_done @ not found 260 261#ifdef CONFIG_ARM_ATAG_DTB_COMPAT 262 /* 263 * OK... Let's do some funky business here. 264 * If we do have a DTB appended to zImage, and we do have 265 * an ATAG list around, we want the later to be translated 266 * and folded into the former here. To be on the safe side, 267 * let's temporarily move the stack away into the malloc 268 * area. No GOT fixup has occurred yet, but none of the 269 * code we're about to call uses any global variable. 270 */ 271 add sp, sp, #0x10000 272 stmfd sp!, {r0-r3, ip, lr} 273 mov r0, r8 274 mov r1, r6 275 sub r2, sp, r6 276 bl atags_to_fdt 277 278 /* 279 * If returned value is 1, there is no ATAG at the location 280 * pointed by r8. Try the typical 0x100 offset from start 281 * of RAM and hope for the best. 282 */ 283 cmp r0, #1 284 sub r0, r4, #TEXT_OFFSET 285 bic r0, r0, #1 286 add r0, r0, #0x100 287 mov r1, r6 288 sub r2, sp, r6 289 bleq atags_to_fdt 290 291 ldmfd sp!, {r0-r3, ip, lr} 292 sub sp, sp, #0x10000 293#endif 294 295 mov r8, r6 @ use the appended device tree 296 297 /* 298 * Make sure that the DTB doesn't end up in the final 299 * kernel's .bss area. To do so, we adjust the decompressed 300 * kernel size to compensate if that .bss size is larger 301 * than the relocated code. 302 */ 303 ldr r5, =_kernel_bss_size 304 adr r1, wont_overwrite 305 sub r1, r6, r1 306 subs r1, r5, r1 307 addhi r9, r9, r1 308 309 /* Get the dtb's size */ 310 ldr r5, [r6, #4] 311#ifndef __ARMEB__ 312 /* convert r5 (dtb size) to little endian */ 313 eor r1, r5, r5, ror #16 314 bic r1, r1, #0x00ff0000 315 mov r5, r5, ror #8 316 eor r5, r5, r1, lsr #8 317#endif 318 319 /* preserve 64-bit alignment */ 320 add r5, r5, #7 321 bic r5, r5, #7 322 323 /* relocate some pointers past the appended dtb */ 324 add r6, r6, r5 325 add r10, r10, r5 326 add sp, sp, r5 327dtb_check_done: 328#endif 329 330/* 331 * Check to see if we will overwrite ourselves. 332 * r4 = final kernel address (possibly with LSB set) 333 * r9 = size of decompressed image 334 * r10 = end of this image, including bss/stack/malloc space if non XIP 335 * We basically want: 336 * r4 - 16k page directory >= r10 -> OK 337 * r4 + image length <= address of wont_overwrite -> OK 338 * Note: the possible LSB in r4 is harmless here. 339 */ 340 add r10, r10, #16384 341 cmp r4, r10 342 bhs wont_overwrite 343 add r10, r4, r9 344 adr r9, wont_overwrite 345 cmp r10, r9 346 bls wont_overwrite 347 348/* 349 * Relocate ourselves past the end of the decompressed kernel. 350 * r6 = _edata 351 * r10 = end of the decompressed kernel 352 * Because we always copy ahead, we need to do it from the end and go 353 * backward in case the source and destination overlap. 354 */ 355 /* 356 * Bump to the next 256-byte boundary with the size of 357 * the relocation code added. This avoids overwriting 358 * ourself when the offset is small. 359 */ 360 add r10, r10, #((reloc_code_end - restart + 256) & ~255) 361 bic r10, r10, #255 362 363 /* Get start of code we want to copy and align it down. */ 364 adr r5, restart 365 bic r5, r5, #31 366 367/* Relocate the hyp vector base if necessary */ 368#ifdef CONFIG_ARM_VIRT_EXT 369 mrs r0, spsr 370 and r0, r0, #MODE_MASK 371 cmp r0, #HYP_MODE 372 bne 1f 373 374 bl __hyp_get_vectors 375 sub r0, r0, r5 376 add r0, r0, r10 377 bl __hyp_set_vectors 3781: 379#endif 380 381 sub r9, r6, r5 @ size to copy 382 add r9, r9, #31 @ rounded up to a multiple 383 bic r9, r9, #31 @ ... of 32 bytes 384 add r6, r9, r5 385 add r9, r9, r10 386 3871: ldmdb r6!, {r0 - r3, r10 - r12, lr} 388 cmp r6, r5 389 stmdb r9!, {r0 - r3, r10 - r12, lr} 390 bhi 1b 391 392 /* Preserve offset to relocated code. */ 393 sub r6, r9, r6 394 395#ifndef CONFIG_ZBOOT_ROM 396 /* cache_clean_flush may use the stack, so relocate it */ 397 add sp, sp, r6 398#endif 399 400 bl cache_clean_flush 401 402 adr r0, BSYM(restart) 403 add r0, r0, r6 404 mov pc, r0 405 406wont_overwrite: 407/* 408 * If delta is zero, we are running at the address we were linked at. 409 * r0 = delta 410 * r2 = BSS start 411 * r3 = BSS end 412 * r4 = kernel execution address (possibly with LSB set) 413 * r5 = appended dtb size (0 if not present) 414 * r7 = architecture ID 415 * r8 = atags pointer 416 * r11 = GOT start 417 * r12 = GOT end 418 * sp = stack pointer 419 */ 420 orrs r1, r0, r5 421 beq not_relocated 422 423 add r11, r11, r0 424 add r12, r12, r0 425 426#ifndef CONFIG_ZBOOT_ROM 427 /* 428 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, 429 * we need to fix up pointers into the BSS region. 430 * Note that the stack pointer has already been fixed up. 431 */ 432 add r2, r2, r0 433 add r3, r3, r0 434 435 /* 436 * Relocate all entries in the GOT table. 437 * Bump bss entries to _edata + dtb size 438 */ 4391: ldr r1, [r11, #0] @ relocate entries in the GOT 440 add r1, r1, r0 @ This fixes up C references 441 cmp r1, r2 @ if entry >= bss_start && 442 cmphs r3, r1 @ bss_end > entry 443 addhi r1, r1, r5 @ entry += dtb size 444 str r1, [r11], #4 @ next entry 445 cmp r11, r12 446 blo 1b 447 448 /* bump our bss pointers too */ 449 add r2, r2, r5 450 add r3, r3, r5 451 452#else 453 454 /* 455 * Relocate entries in the GOT table. We only relocate 456 * the entries that are outside the (relocated) BSS region. 457 */ 4581: ldr r1, [r11, #0] @ relocate entries in the GOT 459 cmp r1, r2 @ entry < bss_start || 460 cmphs r3, r1 @ _end < entry 461 addlo r1, r1, r0 @ table. This fixes up the 462 str r1, [r11], #4 @ C references. 463 cmp r11, r12 464 blo 1b 465#endif 466 467not_relocated: mov r0, #0 4681: str r0, [r2], #4 @ clear bss 469 str r0, [r2], #4 470 str r0, [r2], #4 471 str r0, [r2], #4 472 cmp r2, r3 473 blo 1b 474 475 /* 476 * Did we skip the cache setup earlier? 477 * That is indicated by the LSB in r4. 478 * Do it now if so. 479 */ 480 tst r4, #1 481 bic r4, r4, #1 482 blne cache_on 483 484/* 485 * The C runtime environment should now be setup sufficiently. 486 * Set up some pointers, and start decompressing. 487 * r4 = kernel execution address 488 * r7 = architecture ID 489 * r8 = atags pointer 490 */ 491 mov r0, r4 492 mov r1, sp @ malloc space above stack 493 add r2, sp, #0x10000 @ 64k max 494 mov r3, r7 495 bl decompress_kernel 496 bl cache_clean_flush 497 bl cache_off 498 mov r1, r7 @ restore architecture number 499 mov r2, r8 @ restore atags pointer 500 501#ifdef CONFIG_ARM_VIRT_EXT 502 mrs r0, spsr @ Get saved CPU boot mode 503 and r0, r0, #MODE_MASK 504 cmp r0, #HYP_MODE @ if not booted in HYP mode... 505 bne __enter_kernel @ boot kernel directly 506 507 adr r12, .L__hyp_reentry_vectors_offset 508 ldr r0, [r12] 509 add r0, r0, r12 510 511 bl __hyp_set_vectors 512 __HVC(0) @ otherwise bounce to hyp mode 513 514 b . @ should never be reached 515 516 .align 2 517.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - . 518#else 519 b __enter_kernel 520#endif 521 522 .align 2 523 .type LC0, #object 524LC0: .word LC0 @ r1 525 .word __bss_start @ r2 526 .word _end @ r3 527 .word _edata @ r6 528 .word input_data_end - 4 @ r10 (inflated size location) 529 .word _got_start @ r11 530 .word _got_end @ ip 531 .word .L_user_stack_end @ sp 532 .word _end - restart + 16384 + 1024*1024 533 .size LC0, . - LC0 534 535#ifdef CONFIG_ARCH_RPC 536 .globl params 537params: ldr r0, =0x10000100 @ params_phys for RPC 538 mov pc, lr 539 .ltorg 540 .align 541#endif 542 543/* 544 * Turn on the cache. We need to setup some page tables so that we 545 * can have both the I and D caches on. 546 * 547 * We place the page tables 16k down from the kernel execution address, 548 * and we hope that nothing else is using it. If we're using it, we 549 * will go pop! 550 * 551 * On entry, 552 * r4 = kernel execution address 553 * r7 = architecture number 554 * r8 = atags pointer 555 * On exit, 556 * r0, r1, r2, r3, r9, r10, r12 corrupted 557 * This routine must preserve: 558 * r4, r7, r8 559 */ 560 .align 5 561cache_on: mov r3, #8 @ cache_on function 562 b call_cache_fn 563 564/* 565 * Initialize the highest priority protection region, PR7 566 * to cover all 32bit address and cacheable and bufferable. 567 */ 568__armv4_mpu_cache_on: 569 mov r0, #0x3f @ 4G, the whole 570 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 571 mcr p15, 0, r0, c6, c7, 1 572 573 mov r0, #0x80 @ PR7 574 mcr p15, 0, r0, c2, c0, 0 @ D-cache on 575 mcr p15, 0, r0, c2, c0, 1 @ I-cache on 576 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 577 578 mov r0, #0xc000 579 mcr p15, 0, r0, c5, c0, 1 @ I-access permission 580 mcr p15, 0, r0, c5, c0, 0 @ D-access permission 581 582 mov r0, #0 583 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 584 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 585 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 586 mrc p15, 0, r0, c1, c0, 0 @ read control reg 587 @ ...I .... ..D. WC.M 588 orr r0, r0, #0x002d @ .... .... ..1. 11.1 589 orr r0, r0, #0x1000 @ ...1 .... .... .... 590 591 mcr p15, 0, r0, c1, c0, 0 @ write control reg 592 593 mov r0, #0 594 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 595 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 596 mov pc, lr 597 598__armv3_mpu_cache_on: 599 mov r0, #0x3f @ 4G, the whole 600 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 601 602 mov r0, #0x80 @ PR7 603 mcr p15, 0, r0, c2, c0, 0 @ cache on 604 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 605 606 mov r0, #0xc000 607 mcr p15, 0, r0, c5, c0, 0 @ access permission 608 609 mov r0, #0 610 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 611 /* 612 * ?? ARMv3 MMU does not allow reading the control register, 613 * does this really work on ARMv3 MPU? 614 */ 615 mrc p15, 0, r0, c1, c0, 0 @ read control reg 616 @ .... .... .... WC.M 617 orr r0, r0, #0x000d @ .... .... .... 11.1 618 /* ?? this overwrites the value constructed above? */ 619 mov r0, #0 620 mcr p15, 0, r0, c1, c0, 0 @ write control reg 621 622 /* ?? invalidate for the second time? */ 623 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 624 mov pc, lr 625 626#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 627#define CB_BITS 0x08 628#else 629#define CB_BITS 0x0c 630#endif 631 632__setup_mmu: sub r3, r4, #16384 @ Page directory size 633 bic r3, r3, #0xff @ Align the pointer 634 bic r3, r3, #0x3f00 635/* 636 * Initialise the page tables, turning on the cacheable and bufferable 637 * bits for the RAM area only. 638 */ 639 mov r0, r3 640 mov r9, r0, lsr #18 641 mov r9, r9, lsl #18 @ start of RAM 642 add r10, r9, #0x10000000 @ a reasonable RAM size 643 mov r1, #0x12 @ XN|U + section mapping 644 orr r1, r1, #3 << 10 @ AP=11 645 add r2, r3, #16384 6461: cmp r1, r9 @ if virt > start of RAM 647 cmphs r10, r1 @ && end of RAM > virt 648 bic r1, r1, #0x1c @ clear XN|U + C + B 649 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM 650 orrhs r1, r1, r6 @ set RAM section settings 651 str r1, [r0], #4 @ 1:1 mapping 652 add r1, r1, #1048576 653 teq r0, r2 654 bne 1b 655/* 656 * If ever we are running from Flash, then we surely want the cache 657 * to be enabled also for our execution instance... We map 2MB of it 658 * so there is no map overlap problem for up to 1 MB compressed kernel. 659 * If the execution is in RAM then we would only be duplicating the above. 660 */ 661 orr r1, r6, #0x04 @ ensure B is set for this 662 orr r1, r1, #3 << 10 663 mov r2, pc 664 mov r2, r2, lsr #20 665 orr r1, r1, r2, lsl #20 666 add r0, r3, r2, lsl #2 667 str r1, [r0], #4 668 add r1, r1, #1048576 669 str r1, [r0] 670 mov pc, lr 671ENDPROC(__setup_mmu) 672 673@ Enable unaligned access on v6, to allow better code generation 674@ for the decompressor C code: 675__armv6_mmu_cache_on: 676 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR 677 bic r0, r0, #2 @ A (no unaligned access fault) 678 orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 679 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR 680 b __armv4_mmu_cache_on 681 682__arm926ejs_mmu_cache_on: 683#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 684 mov r0, #4 @ put dcache in WT mode 685 mcr p15, 7, r0, c15, c0, 0 686#endif 687 688__armv4_mmu_cache_on: 689 mov r12, lr 690#ifdef CONFIG_MMU 691 mov r6, #CB_BITS | 0x12 @ U 692 bl __setup_mmu 693 mov r0, #0 694 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 695 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 696 mrc p15, 0, r0, c1, c0, 0 @ read control reg 697 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 698 orr r0, r0, #0x0030 699 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables 700 bl __common_mmu_cache_on 701 mov r0, #0 702 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 703#endif 704 mov pc, r12 705 706__armv7_mmu_cache_on: 707 mov r12, lr 708#ifdef CONFIG_MMU 709 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 710 tst r11, #0xf @ VMSA 711 movne r6, #CB_BITS | 0x02 @ !XN 712 blne __setup_mmu 713 mov r0, #0 714 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 715 tst r11, #0xf @ VMSA 716 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 717#endif 718 mrc p15, 0, r0, c1, c0, 0 @ read control reg 719 bic r0, r0, #1 << 28 @ clear SCTLR.TRE 720 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 721 orr r0, r0, #0x003c @ write buffer 722 bic r0, r0, #2 @ A (no unaligned access fault) 723 orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 724 @ (needed for ARM1176) 725#ifdef CONFIG_MMU 726 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables 727 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg 728 orrne r0, r0, #1 @ MMU enabled 729 movne r1, #0xfffffffd @ domain 0 = client 730 bic r6, r6, #1 << 31 @ 32-bit translation system 731 bic r6, r6, #3 << 0 @ use only ttbr0 732 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 733 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 734 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control 735#endif 736 mcr p15, 0, r0, c7, c5, 4 @ ISB 737 mcr p15, 0, r0, c1, c0, 0 @ load control register 738 mrc p15, 0, r0, c1, c0, 0 @ and read it back 739 mov r0, #0 740 mcr p15, 0, r0, c7, c5, 4 @ ISB 741 mov pc, r12 742 743__fa526_cache_on: 744 mov r12, lr 745 mov r6, #CB_BITS | 0x12 @ U 746 bl __setup_mmu 747 mov r0, #0 748 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache 749 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 750 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 751 mrc p15, 0, r0, c1, c0, 0 @ read control reg 752 orr r0, r0, #0x1000 @ I-cache enable 753 bl __common_mmu_cache_on 754 mov r0, #0 755 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 756 mov pc, r12 757 758__common_mmu_cache_on: 759#ifndef CONFIG_THUMB2_KERNEL 760#ifndef DEBUG 761 orr r0, r0, #0x000d @ Write buffer, mmu 762#endif 763 mov r1, #-1 764 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer 765 mcr p15, 0, r1, c3, c0, 0 @ load domain access control 766 b 1f 767 .align 5 @ cache line aligned 7681: mcr p15, 0, r0, c1, c0, 0 @ load control register 769 mrc p15, 0, r0, c1, c0, 0 @ and read it back to 770 sub pc, lr, r0, lsr #32 @ properly flush pipeline 771#endif 772 773#define PROC_ENTRY_SIZE (4*5) 774 775/* 776 * Here follow the relocatable cache support functions for the 777 * various processors. This is a generic hook for locating an 778 * entry and jumping to an instruction at the specified offset 779 * from the start of the block. Please note this is all position 780 * independent code. 781 * 782 * r1 = corrupted 783 * r2 = corrupted 784 * r3 = block offset 785 * r9 = corrupted 786 * r12 = corrupted 787 */ 788 789call_cache_fn: adr r12, proc_types 790#ifdef CONFIG_CPU_CP15 791 mrc p15, 0, r9, c0, c0 @ get processor ID 792#else 793 ldr r9, =CONFIG_PROCESSOR_ID 794#endif 7951: ldr r1, [r12, #0] @ get value 796 ldr r2, [r12, #4] @ get mask 797 eor r1, r1, r9 @ (real ^ match) 798 tst r1, r2 @ & mask 799 ARM( addeq pc, r12, r3 ) @ call cache function 800 THUMB( addeq r12, r3 ) 801 THUMB( moveq pc, r12 ) @ call cache function 802 add r12, r12, #PROC_ENTRY_SIZE 803 b 1b 804 805/* 806 * Table for cache operations. This is basically: 807 * - CPU ID match 808 * - CPU ID mask 809 * - 'cache on' method instruction 810 * - 'cache off' method instruction 811 * - 'cache flush' method instruction 812 * 813 * We match an entry using: ((real_id ^ match) & mask) == 0 814 * 815 * Writethrough caches generally only need 'on' and 'off' 816 * methods. Writeback caches _must_ have the flush method 817 * defined. 818 */ 819 .align 2 820 .type proc_types,#object 821proc_types: 822 .word 0x41000000 @ old ARM ID 823 .word 0xff00f000 824 mov pc, lr 825 THUMB( nop ) 826 mov pc, lr 827 THUMB( nop ) 828 mov pc, lr 829 THUMB( nop ) 830 831 .word 0x41007000 @ ARM7/710 832 .word 0xfff8fe00 833 mov pc, lr 834 THUMB( nop ) 835 mov pc, lr 836 THUMB( nop ) 837 mov pc, lr 838 THUMB( nop ) 839 840 .word 0x41807200 @ ARM720T (writethrough) 841 .word 0xffffff00 842 W(b) __armv4_mmu_cache_on 843 W(b) __armv4_mmu_cache_off 844 mov pc, lr 845 THUMB( nop ) 846 847 .word 0x41007400 @ ARM74x 848 .word 0xff00ff00 849 W(b) __armv3_mpu_cache_on 850 W(b) __armv3_mpu_cache_off 851 W(b) __armv3_mpu_cache_flush 852 853 .word 0x41009400 @ ARM94x 854 .word 0xff00ff00 855 W(b) __armv4_mpu_cache_on 856 W(b) __armv4_mpu_cache_off 857 W(b) __armv4_mpu_cache_flush 858 859 .word 0x41069260 @ ARM926EJ-S (v5TEJ) 860 .word 0xff0ffff0 861 W(b) __arm926ejs_mmu_cache_on 862 W(b) __armv4_mmu_cache_off 863 W(b) __armv5tej_mmu_cache_flush 864 865 .word 0x00007000 @ ARM7 IDs 866 .word 0x0000f000 867 mov pc, lr 868 THUMB( nop ) 869 mov pc, lr 870 THUMB( nop ) 871 mov pc, lr 872 THUMB( nop ) 873 874 @ Everything from here on will be the new ID system. 875 876 .word 0x4401a100 @ sa110 / sa1100 877 .word 0xffffffe0 878 W(b) __armv4_mmu_cache_on 879 W(b) __armv4_mmu_cache_off 880 W(b) __armv4_mmu_cache_flush 881 882 .word 0x6901b110 @ sa1110 883 .word 0xfffffff0 884 W(b) __armv4_mmu_cache_on 885 W(b) __armv4_mmu_cache_off 886 W(b) __armv4_mmu_cache_flush 887 888 .word 0x56056900 889 .word 0xffffff00 @ PXA9xx 890 W(b) __armv4_mmu_cache_on 891 W(b) __armv4_mmu_cache_off 892 W(b) __armv4_mmu_cache_flush 893 894 .word 0x56158000 @ PXA168 895 .word 0xfffff000 896 W(b) __armv4_mmu_cache_on 897 W(b) __armv4_mmu_cache_off 898 W(b) __armv5tej_mmu_cache_flush 899 900 .word 0x56050000 @ Feroceon 901 .word 0xff0f0000 902 W(b) __armv4_mmu_cache_on 903 W(b) __armv4_mmu_cache_off 904 W(b) __armv5tej_mmu_cache_flush 905 906#ifdef CONFIG_CPU_FEROCEON_OLD_ID 907 /* this conflicts with the standard ARMv5TE entry */ 908 .long 0x41009260 @ Old Feroceon 909 .long 0xff00fff0 910 b __armv4_mmu_cache_on 911 b __armv4_mmu_cache_off 912 b __armv5tej_mmu_cache_flush 913#endif 914 915 .word 0x66015261 @ FA526 916 .word 0xff01fff1 917 W(b) __fa526_cache_on 918 W(b) __armv4_mmu_cache_off 919 W(b) __fa526_cache_flush 920 921 @ These match on the architecture ID 922 923 .word 0x00020000 @ ARMv4T 924 .word 0x000f0000 925 W(b) __armv4_mmu_cache_on 926 W(b) __armv4_mmu_cache_off 927 W(b) __armv4_mmu_cache_flush 928 929 .word 0x00050000 @ ARMv5TE 930 .word 0x000f0000 931 W(b) __armv4_mmu_cache_on 932 W(b) __armv4_mmu_cache_off 933 W(b) __armv4_mmu_cache_flush 934 935 .word 0x00060000 @ ARMv5TEJ 936 .word 0x000f0000 937 W(b) __armv4_mmu_cache_on 938 W(b) __armv4_mmu_cache_off 939 W(b) __armv5tej_mmu_cache_flush 940 941 .word 0x0007b000 @ ARMv6 942 .word 0x000ff000 943 W(b) __armv6_mmu_cache_on 944 W(b) __armv4_mmu_cache_off 945 W(b) __armv6_mmu_cache_flush 946 947 .word 0x000f0000 @ new CPU Id 948 .word 0x000f0000 949 W(b) __armv7_mmu_cache_on 950 W(b) __armv7_mmu_cache_off 951 W(b) __armv7_mmu_cache_flush 952 953 .word 0 @ unrecognised type 954 .word 0 955 mov pc, lr 956 THUMB( nop ) 957 mov pc, lr 958 THUMB( nop ) 959 mov pc, lr 960 THUMB( nop ) 961 962 .size proc_types, . - proc_types 963 964 /* 965 * If you get a "non-constant expression in ".if" statement" 966 * error from the assembler on this line, check that you have 967 * not accidentally written a "b" instruction where you should 968 * have written W(b). 969 */ 970 .if (. - proc_types) % PROC_ENTRY_SIZE != 0 971 .error "The size of one or more proc_types entries is wrong." 972 .endif 973 974/* 975 * Turn off the Cache and MMU. ARMv3 does not support 976 * reading the control register, but ARMv4 does. 977 * 978 * On exit, 979 * r0, r1, r2, r3, r9, r12 corrupted 980 * This routine must preserve: 981 * r4, r7, r8 982 */ 983 .align 5 984cache_off: mov r3, #12 @ cache_off function 985 b call_cache_fn 986 987__armv4_mpu_cache_off: 988 mrc p15, 0, r0, c1, c0 989 bic r0, r0, #0x000d 990 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off 991 mov r0, #0 992 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 993 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache 994 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache 995 mov pc, lr 996 997__armv3_mpu_cache_off: 998 mrc p15, 0, r0, c1, c0 999 bic r0, r0, #0x000d 1000 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off 1001 mov r0, #0 1002 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 1003 mov pc, lr 1004 1005__armv4_mmu_cache_off: 1006#ifdef CONFIG_MMU 1007 mrc p15, 0, r0, c1, c0 1008 bic r0, r0, #0x000d 1009 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 1010 mov r0, #0 1011 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 1012 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 1013#endif 1014 mov pc, lr 1015 1016__armv7_mmu_cache_off: 1017 mrc p15, 0, r0, c1, c0 1018#ifdef CONFIG_MMU 1019 bic r0, r0, #0x000d 1020#else 1021 bic r0, r0, #0x000c 1022#endif 1023 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 1024 mov r12, lr 1025 bl __armv7_mmu_cache_flush 1026 mov r0, #0 1027#ifdef CONFIG_MMU 1028 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB 1029#endif 1030 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC 1031 mcr p15, 0, r0, c7, c10, 4 @ DSB 1032 mcr p15, 0, r0, c7, c5, 4 @ ISB 1033 mov pc, r12 1034 1035/* 1036 * Clean and flush the cache to maintain consistency. 1037 * 1038 * On exit, 1039 * r1, r2, r3, r9, r10, r11, r12 corrupted 1040 * This routine must preserve: 1041 * r4, r6, r7, r8 1042 */ 1043 .align 5 1044cache_clean_flush: 1045 mov r3, #16 1046 b call_cache_fn 1047 1048__armv4_mpu_cache_flush: 1049 tst r4, #1 1050 movne pc, lr 1051 mov r2, #1 1052 mov r3, #0 1053 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 1054 mov r1, #7 << 5 @ 8 segments 10551: orr r3, r1, #63 << 26 @ 64 entries 10562: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 1057 subs r3, r3, #1 << 26 1058 bcs 2b @ entries 63 to 0 1059 subs r1, r1, #1 << 5 1060 bcs 1b @ segments 7 to 0 1061 1062 teq r2, #0 1063 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 1064 mcr p15, 0, ip, c7, c10, 4 @ drain WB 1065 mov pc, lr 1066 1067__fa526_cache_flush: 1068 tst r4, #1 1069 movne pc, lr 1070 mov r1, #0 1071 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache 1072 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1073 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1074 mov pc, lr 1075 1076__armv6_mmu_cache_flush: 1077 mov r1, #0 1078 tst r4, #1 1079 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D 1080 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB 1081 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified 1082 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1083 mov pc, lr 1084 1085__armv7_mmu_cache_flush: 1086 tst r4, #1 1087 bne iflush 1088 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 1089 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) 1090 mov r10, #0 1091 beq hierarchical 1092 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D 1093 b iflush 1094hierarchical: 1095 mcr p15, 0, r10, c7, c10, 5 @ DMB 1096 stmfd sp!, {r0-r7, r9-r11} 1097 mrc p15, 1, r0, c0, c0, 1 @ read clidr 1098 ands r3, r0, #0x7000000 @ extract loc from clidr 1099 mov r3, r3, lsr #23 @ left align loc bit field 1100 beq finished @ if loc is 0, then no need to clean 1101 mov r10, #0 @ start clean at cache level 0 1102loop1: 1103 add r2, r10, r10, lsr #1 @ work out 3x current cache level 1104 mov r1, r0, lsr r2 @ extract cache type bits from clidr 1105 and r1, r1, #7 @ mask of the bits for current cache only 1106 cmp r1, #2 @ see what cache we have at this level 1107 blt skip @ skip if no cache, or just i-cache 1108 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 1109 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr 1110 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 1111 and r2, r1, #7 @ extract the length of the cache lines 1112 add r2, r2, #4 @ add 4 (line length offset) 1113 ldr r4, =0x3ff 1114 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 1115 clz r5, r4 @ find bit position of way size increment 1116 ldr r7, =0x7fff 1117 ands r7, r7, r1, lsr #13 @ extract max number of the index size 1118loop2: 1119 mov r9, r4 @ create working copy of max way size 1120loop3: 1121 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 1122 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 1123 THUMB( lsl r6, r9, r5 ) 1124 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 1125 THUMB( lsl r6, r7, r2 ) 1126 THUMB( orr r11, r11, r6 ) @ factor index number into r11 1127 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 1128 subs r9, r9, #1 @ decrement the way 1129 bge loop3 1130 subs r7, r7, #1 @ decrement the index 1131 bge loop2 1132skip: 1133 add r10, r10, #2 @ increment cache number 1134 cmp r3, r10 1135 bgt loop1 1136finished: 1137 ldmfd sp!, {r0-r7, r9-r11} 1138 mov r10, #0 @ swith back to cache level 0 1139 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 1140iflush: 1141 mcr p15, 0, r10, c7, c10, 4 @ DSB 1142 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB 1143 mcr p15, 0, r10, c7, c10, 4 @ DSB 1144 mcr p15, 0, r10, c7, c5, 4 @ ISB 1145 mov pc, lr 1146 1147__armv5tej_mmu_cache_flush: 1148 tst r4, #1 1149 movne pc, lr 11501: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache 1151 bne 1b 1152 mcr p15, 0, r0, c7, c5, 0 @ flush I cache 1153 mcr p15, 0, r0, c7, c10, 4 @ drain WB 1154 mov pc, lr 1155 1156__armv4_mmu_cache_flush: 1157 tst r4, #1 1158 movne pc, lr 1159 mov r2, #64*1024 @ default: 32K dcache size (*2) 1160 mov r11, #32 @ default: 32 byte line size 1161 mrc p15, 0, r3, c0, c0, 1 @ read cache type 1162 teq r3, r9 @ cache ID register present? 1163 beq no_cache_id 1164 mov r1, r3, lsr #18 1165 and r1, r1, #7 1166 mov r2, #1024 1167 mov r2, r2, lsl r1 @ base dcache size *2 1168 tst r3, #1 << 14 @ test M bit 1169 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 1170 mov r3, r3, lsr #12 1171 and r3, r3, #3 1172 mov r11, #8 1173 mov r11, r11, lsl r3 @ cache line size in bytes 1174no_cache_id: 1175 mov r1, pc 1176 bic r1, r1, #63 @ align to longest cache line 1177 add r2, r1, r2 11781: 1179 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache 1180 THUMB( ldr r3, [r1] ) @ s/w flush D cache 1181 THUMB( add r1, r1, r11 ) 1182 teq r1, r2 1183 bne 1b 1184 1185 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1186 mcr p15, 0, r1, c7, c6, 0 @ flush D cache 1187 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1188 mov pc, lr 1189 1190__armv3_mmu_cache_flush: 1191__armv3_mpu_cache_flush: 1192 tst r4, #1 1193 movne pc, lr 1194 mov r1, #0 1195 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 1196 mov pc, lr 1197 1198/* 1199 * Various debugging routines for printing hex characters and 1200 * memory, which again must be relocatable. 1201 */ 1202#ifdef DEBUG 1203 .align 2 1204 .type phexbuf,#object 1205phexbuf: .space 12 1206 .size phexbuf, . - phexbuf 1207 1208@ phex corrupts {r0, r1, r2, r3} 1209phex: adr r3, phexbuf 1210 mov r2, #0 1211 strb r2, [r3, r1] 12121: subs r1, r1, #1 1213 movmi r0, r3 1214 bmi puts 1215 and r2, r0, #15 1216 mov r0, r0, lsr #4 1217 cmp r2, #10 1218 addge r2, r2, #7 1219 add r2, r2, #'0' 1220 strb r2, [r3, r1] 1221 b 1b 1222 1223@ puts corrupts {r0, r1, r2, r3} 1224puts: loadsp r3, r1 12251: ldrb r2, [r0], #1 1226 teq r2, #0 1227 moveq pc, lr 12282: writeb r2, r3 1229 mov r1, #0x00020000 12303: subs r1, r1, #1 1231 bne 3b 1232 teq r2, #'\n' 1233 moveq r2, #'\r' 1234 beq 2b 1235 teq r0, #0 1236 bne 1b 1237 mov pc, lr 1238@ putc corrupts {r0, r1, r2, r3} 1239putc: 1240 mov r2, r0 1241 mov r0, #0 1242 loadsp r3, r1 1243 b 2b 1244 1245@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} 1246memdump: mov r12, r0 1247 mov r10, lr 1248 mov r11, #0 12492: mov r0, r11, lsl #2 1250 add r0, r0, r12 1251 mov r1, #8 1252 bl phex 1253 mov r0, #':' 1254 bl putc 12551: mov r0, #' ' 1256 bl putc 1257 ldr r0, [r12, r11, lsl #2] 1258 mov r1, #8 1259 bl phex 1260 and r0, r11, #7 1261 teq r0, #3 1262 moveq r0, #' ' 1263 bleq putc 1264 and r0, r11, #7 1265 add r11, r11, #1 1266 teq r0, #7 1267 bne 1b 1268 mov r0, #'\n' 1269 bl putc 1270 cmp r11, #64 1271 blt 2b 1272 mov pc, r10 1273#endif 1274 1275 .ltorg 1276 1277#ifdef CONFIG_ARM_VIRT_EXT 1278.align 5 1279__hyp_reentry_vectors: 1280 W(b) . @ reset 1281 W(b) . @ undef 1282 W(b) . @ svc 1283 W(b) . @ pabort 1284 W(b) . @ dabort 1285 W(b) __enter_kernel @ hyp 1286 W(b) . @ irq 1287 W(b) . @ fiq 1288#endif /* CONFIG_ARM_VIRT_EXT */ 1289 1290__enter_kernel: 1291 mov r0, #0 @ must be 0 1292 ARM( mov pc, r4 ) @ call kernel 1293 THUMB( bx r4 ) @ entry point is always ARM 1294 1295reloc_code_end: 1296 1297 .align 1298 .section ".stack", "aw", %nobits 1299.L_user_stack: .space 4096 1300.L_user_stack_end: 1301