1/* 2 * linux/arch/arm/boot/compressed/head.S 3 * 4 * Copyright (C) 1996-2002 Russell King 5 * Copyright (C) 2004 Hyok S. Choi (MPU support) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/linkage.h> 12#include <asm/assembler.h> 13 14 .arch armv7-a 15/* 16 * Debugging stuff 17 * 18 * Note that these macros must not contain any code which is not 19 * 100% relocatable. Any attempt to do so will result in a crash. 20 * Please select one of the following when turning on debugging. 21 */ 22#ifdef DEBUG 23 24#if defined(CONFIG_DEBUG_ICEDCC) 25 26#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 27 .macro loadsp, rb, tmp 28 .endm 29 .macro writeb, ch, rb 30 mcr p14, 0, \ch, c0, c5, 0 31 .endm 32#elif defined(CONFIG_CPU_XSCALE) 33 .macro loadsp, rb, tmp 34 .endm 35 .macro writeb, ch, rb 36 mcr p14, 0, \ch, c8, c0, 0 37 .endm 38#else 39 .macro loadsp, rb, tmp 40 .endm 41 .macro writeb, ch, rb 42 mcr p14, 0, \ch, c1, c0, 0 43 .endm 44#endif 45 46#else 47 48#include CONFIG_DEBUG_LL_INCLUDE 49 50 .macro writeb, ch, rb 51 senduart \ch, \rb 52 .endm 53 54#if defined(CONFIG_ARCH_SA1100) 55 .macro loadsp, rb, tmp 56 mov \rb, #0x80000000 @ physical base address 57#ifdef CONFIG_DEBUG_LL_SER3 58 add \rb, \rb, #0x00050000 @ Ser3 59#else 60 add \rb, \rb, #0x00010000 @ Ser1 61#endif 62 .endm 63#else 64 .macro loadsp, rb, tmp 65 addruart \rb, \tmp 66 .endm 67#endif 68#endif 69#endif 70 71 .macro kputc,val 72 mov r0, \val 73 bl putc 74 .endm 75 76 .macro kphex,val,len 77 mov r0, \val 78 mov r1, #\len 79 bl phex 80 .endm 81 82 .macro debug_reloc_start 83#ifdef DEBUG 84 kputc #'\n' 85 kphex r6, 8 /* processor id */ 86 kputc #':' 87 kphex r7, 8 /* architecture id */ 88#ifdef CONFIG_CPU_CP15 89 kputc #':' 90 mrc p15, 0, r0, c1, c0 91 kphex r0, 8 /* control reg */ 92#endif 93 kputc #'\n' 94 kphex r5, 8 /* decompressed kernel start */ 95 kputc #'-' 96 kphex r9, 8 /* decompressed kernel end */ 97 kputc #'>' 98 kphex r4, 8 /* kernel execution address */ 99 kputc #'\n' 100#endif 101 .endm 102 103 .macro debug_reloc_end 104#ifdef DEBUG 105 kphex r5, 8 /* end of kernel */ 106 kputc #'\n' 107 mov r0, r4 108 bl memdump /* dump 256 bytes at start of kernel */ 109#endif 110 .endm 111 112 .section ".start", #alloc, #execinstr 113/* 114 * sort out different calling conventions 115 */ 116 .align 117 .arm @ Always enter in ARM state 118start: 119 .type start,#function 120 .rept 7 121 mov r0, r0 122 .endr 123 ARM( mov r0, r0 ) 124 ARM( b 1f ) 125 THUMB( adr r12, BSYM(1f) ) 126 THUMB( bx r12 ) 127 128 .word 0x016f2818 @ Magic numbers to help the loader 129 .word start @ absolute load/run zImage address 130 .word _edata @ zImage end address 131 THUMB( .thumb ) 1321: 133 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 134 mrs r9, cpsr 135#ifdef CONFIG_ARM_VIRT_EXT 136 bl __hyp_stub_install @ get into SVC mode, reversibly 137#endif 138 mov r7, r1 @ save architecture ID 139 mov r8, r2 @ save atags pointer 140 141 /* 142 * Booting from Angel - need to enter SVC mode and disable 143 * FIQs/IRQs (numeric definitions from angel arm.h source). 144 * We only do this if we were in user mode on entry. 145 */ 146 mrs r2, cpsr @ get current mode 147 tst r2, #3 @ not user? 148 bne not_angel 149 mov r0, #0x17 @ angel_SWIreason_EnterSVC 150 ARM( swi 0x123456 ) @ angel_SWI_ARM 151 THUMB( svc 0xab ) @ angel_SWI_THUMB 152not_angel: 153 safe_svcmode_maskall r0 154 msr spsr_cxsf, r9 @ Save the CPU boot mode in 155 @ SPSR 156 /* 157 * Note that some cache flushing and other stuff may 158 * be needed here - is there an Angel SWI call for this? 159 */ 160 161 /* 162 * some architecture specific code can be inserted 163 * by the linker here, but it should preserve r7, r8, and r9. 164 */ 165 166 .text 167 168#ifdef CONFIG_AUTO_ZRELADDR 169 @ determine final kernel image address 170 mov r4, pc 171 and r4, r4, #0xf8000000 172 add r4, r4, #TEXT_OFFSET 173#else 174 ldr r4, =zreladdr 175#endif 176 177 /* 178 * Set up a page table only if it won't overwrite ourself. 179 * That means r4 < pc && r4 - 16k page directory > &_end. 180 * Given that r4 > &_end is most unfrequent, we add a rough 181 * additional 1MB of room for a possible appended DTB. 182 */ 183 mov r0, pc 184 cmp r0, r4 185 ldrcc r0, LC0+32 186 addcc r0, r0, pc 187 cmpcc r4, r0 188 orrcc r4, r4, #1 @ remember we skipped cache_on 189 blcs cache_on 190 191restart: adr r0, LC0 192 ldmia r0, {r1, r2, r3, r6, r10, r11, r12} 193 ldr sp, [r0, #28] 194 195 /* 196 * We might be running at a different address. We need 197 * to fix up various pointers. 198 */ 199 sub r0, r0, r1 @ calculate the delta offset 200 add r6, r6, r0 @ _edata 201 add r10, r10, r0 @ inflated kernel size location 202 203 /* 204 * The kernel build system appends the size of the 205 * decompressed kernel at the end of the compressed data 206 * in little-endian form. 207 */ 208 ldrb r9, [r10, #0] 209 ldrb lr, [r10, #1] 210 orr r9, r9, lr, lsl #8 211 ldrb lr, [r10, #2] 212 ldrb r10, [r10, #3] 213 orr r9, r9, lr, lsl #16 214 orr r9, r9, r10, lsl #24 215 216#ifndef CONFIG_ZBOOT_ROM 217 /* malloc space is above the relocated stack (64k max) */ 218 add sp, sp, r0 219 add r10, sp, #0x10000 220#else 221 /* 222 * With ZBOOT_ROM the bss/stack is non relocatable, 223 * but someone could still run this code from RAM, 224 * in which case our reference is _edata. 225 */ 226 mov r10, r6 227#endif 228 229 mov r5, #0 @ init dtb size to 0 230#ifdef CONFIG_ARM_APPENDED_DTB 231/* 232 * r0 = delta 233 * r2 = BSS start 234 * r3 = BSS end 235 * r4 = final kernel address (possibly with LSB set) 236 * r5 = appended dtb size (still unknown) 237 * r6 = _edata 238 * r7 = architecture ID 239 * r8 = atags/device tree pointer 240 * r9 = size of decompressed image 241 * r10 = end of this image, including bss/stack/malloc space if non XIP 242 * r11 = GOT start 243 * r12 = GOT end 244 * sp = stack pointer 245 * 246 * if there are device trees (dtb) appended to zImage, advance r10 so that the 247 * dtb data will get relocated along with the kernel if necessary. 248 */ 249 250 ldr lr, [r6, #0] 251#ifndef __ARMEB__ 252 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian 253#else 254 ldr r1, =0xd00dfeed 255#endif 256 cmp lr, r1 257 bne dtb_check_done @ not found 258 259#ifdef CONFIG_ARM_ATAG_DTB_COMPAT 260 /* 261 * OK... Let's do some funky business here. 262 * If we do have a DTB appended to zImage, and we do have 263 * an ATAG list around, we want the later to be translated 264 * and folded into the former here. To be on the safe side, 265 * let's temporarily move the stack away into the malloc 266 * area. No GOT fixup has occurred yet, but none of the 267 * code we're about to call uses any global variable. 268 */ 269 add sp, sp, #0x10000 270 stmfd sp!, {r0-r3, ip, lr} 271 mov r0, r8 272 mov r1, r6 273 sub r2, sp, r6 274 bl atags_to_fdt 275 276 /* 277 * If returned value is 1, there is no ATAG at the location 278 * pointed by r8. Try the typical 0x100 offset from start 279 * of RAM and hope for the best. 280 */ 281 cmp r0, #1 282 sub r0, r4, #TEXT_OFFSET 283 bic r0, r0, #1 284 add r0, r0, #0x100 285 mov r1, r6 286 sub r2, sp, r6 287 bleq atags_to_fdt 288 289 ldmfd sp!, {r0-r3, ip, lr} 290 sub sp, sp, #0x10000 291#endif 292 293 mov r8, r6 @ use the appended device tree 294 295 /* 296 * Make sure that the DTB doesn't end up in the final 297 * kernel's .bss area. To do so, we adjust the decompressed 298 * kernel size to compensate if that .bss size is larger 299 * than the relocated code. 300 */ 301 ldr r5, =_kernel_bss_size 302 adr r1, wont_overwrite 303 sub r1, r6, r1 304 subs r1, r5, r1 305 addhi r9, r9, r1 306 307 /* Get the dtb's size */ 308 ldr r5, [r6, #4] 309#ifndef __ARMEB__ 310 /* convert r5 (dtb size) to little endian */ 311 eor r1, r5, r5, ror #16 312 bic r1, r1, #0x00ff0000 313 mov r5, r5, ror #8 314 eor r5, r5, r1, lsr #8 315#endif 316 317 /* preserve 64-bit alignment */ 318 add r5, r5, #7 319 bic r5, r5, #7 320 321 /* relocate some pointers past the appended dtb */ 322 add r6, r6, r5 323 add r10, r10, r5 324 add sp, sp, r5 325dtb_check_done: 326#endif 327 328/* 329 * Check to see if we will overwrite ourselves. 330 * r4 = final kernel address (possibly with LSB set) 331 * r9 = size of decompressed image 332 * r10 = end of this image, including bss/stack/malloc space if non XIP 333 * We basically want: 334 * r4 - 16k page directory >= r10 -> OK 335 * r4 + image length <= address of wont_overwrite -> OK 336 * Note: the possible LSB in r4 is harmless here. 337 */ 338 add r10, r10, #16384 339 cmp r4, r10 340 bhs wont_overwrite 341 add r10, r4, r9 342 adr r9, wont_overwrite 343 cmp r10, r9 344 bls wont_overwrite 345 346/* 347 * Relocate ourselves past the end of the decompressed kernel. 348 * r6 = _edata 349 * r10 = end of the decompressed kernel 350 * Because we always copy ahead, we need to do it from the end and go 351 * backward in case the source and destination overlap. 352 */ 353 /* 354 * Bump to the next 256-byte boundary with the size of 355 * the relocation code added. This avoids overwriting 356 * ourself when the offset is small. 357 */ 358 add r10, r10, #((reloc_code_end - restart + 256) & ~255) 359 bic r10, r10, #255 360 361 /* Get start of code we want to copy and align it down. */ 362 adr r5, restart 363 bic r5, r5, #31 364 365/* Relocate the hyp vector base if necessary */ 366#ifdef CONFIG_ARM_VIRT_EXT 367 mrs r0, spsr 368 and r0, r0, #MODE_MASK 369 cmp r0, #HYP_MODE 370 bne 1f 371 372 bl __hyp_get_vectors 373 sub r0, r0, r5 374 add r0, r0, r10 375 bl __hyp_set_vectors 3761: 377#endif 378 379 sub r9, r6, r5 @ size to copy 380 add r9, r9, #31 @ rounded up to a multiple 381 bic r9, r9, #31 @ ... of 32 bytes 382 add r6, r9, r5 383 add r9, r9, r10 384 3851: ldmdb r6!, {r0 - r3, r10 - r12, lr} 386 cmp r6, r5 387 stmdb r9!, {r0 - r3, r10 - r12, lr} 388 bhi 1b 389 390 /* Preserve offset to relocated code. */ 391 sub r6, r9, r6 392 393#ifndef CONFIG_ZBOOT_ROM 394 /* cache_clean_flush may use the stack, so relocate it */ 395 add sp, sp, r6 396#endif 397 398 tst r4, #1 399 bleq cache_clean_flush 400 401 adr r0, BSYM(restart) 402 add r0, r0, r6 403 mov pc, r0 404 405wont_overwrite: 406/* 407 * If delta is zero, we are running at the address we were linked at. 408 * r0 = delta 409 * r2 = BSS start 410 * r3 = BSS end 411 * r4 = kernel execution address (possibly with LSB set) 412 * r5 = appended dtb size (0 if not present) 413 * r7 = architecture ID 414 * r8 = atags pointer 415 * r11 = GOT start 416 * r12 = GOT end 417 * sp = stack pointer 418 */ 419 orrs r1, r0, r5 420 beq not_relocated 421 422 add r11, r11, r0 423 add r12, r12, r0 424 425#ifndef CONFIG_ZBOOT_ROM 426 /* 427 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, 428 * we need to fix up pointers into the BSS region. 429 * Note that the stack pointer has already been fixed up. 430 */ 431 add r2, r2, r0 432 add r3, r3, r0 433 434 /* 435 * Relocate all entries in the GOT table. 436 * Bump bss entries to _edata + dtb size 437 */ 4381: ldr r1, [r11, #0] @ relocate entries in the GOT 439 add r1, r1, r0 @ This fixes up C references 440 cmp r1, r2 @ if entry >= bss_start && 441 cmphs r3, r1 @ bss_end > entry 442 addhi r1, r1, r5 @ entry += dtb size 443 str r1, [r11], #4 @ next entry 444 cmp r11, r12 445 blo 1b 446 447 /* bump our bss pointers too */ 448 add r2, r2, r5 449 add r3, r3, r5 450 451#else 452 453 /* 454 * Relocate entries in the GOT table. We only relocate 455 * the entries that are outside the (relocated) BSS region. 456 */ 4571: ldr r1, [r11, #0] @ relocate entries in the GOT 458 cmp r1, r2 @ entry < bss_start || 459 cmphs r3, r1 @ _end < entry 460 addlo r1, r1, r0 @ table. This fixes up the 461 str r1, [r11], #4 @ C references. 462 cmp r11, r12 463 blo 1b 464#endif 465 466not_relocated: mov r0, #0 4671: str r0, [r2], #4 @ clear bss 468 str r0, [r2], #4 469 str r0, [r2], #4 470 str r0, [r2], #4 471 cmp r2, r3 472 blo 1b 473 474 /* 475 * Did we skip the cache setup earlier? 476 * That is indicated by the LSB in r4. 477 * Do it now if so. 478 */ 479 tst r4, #1 480 bic r4, r4, #1 481 blne cache_on 482 483/* 484 * The C runtime environment should now be setup sufficiently. 485 * Set up some pointers, and start decompressing. 486 * r4 = kernel execution address 487 * r7 = architecture ID 488 * r8 = atags pointer 489 */ 490 mov r0, r4 491 mov r1, sp @ malloc space above stack 492 add r2, sp, #0x10000 @ 64k max 493 mov r3, r7 494 bl decompress_kernel 495 bl cache_clean_flush 496 bl cache_off 497 mov r1, r7 @ restore architecture number 498 mov r2, r8 @ restore atags pointer 499 500#ifdef CONFIG_ARM_VIRT_EXT 501 mrs r0, spsr @ Get saved CPU boot mode 502 and r0, r0, #MODE_MASK 503 cmp r0, #HYP_MODE @ if not booted in HYP mode... 504 bne __enter_kernel @ boot kernel directly 505 506 adr r12, .L__hyp_reentry_vectors_offset 507 ldr r0, [r12] 508 add r0, r0, r12 509 510 bl __hyp_set_vectors 511 __HVC(0) @ otherwise bounce to hyp mode 512 513 b . @ should never be reached 514 515 .align 2 516.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - . 517#else 518 b __enter_kernel 519#endif 520 521 .align 2 522 .type LC0, #object 523LC0: .word LC0 @ r1 524 .word __bss_start @ r2 525 .word _end @ r3 526 .word _edata @ r6 527 .word input_data_end - 4 @ r10 (inflated size location) 528 .word _got_start @ r11 529 .word _got_end @ ip 530 .word .L_user_stack_end @ sp 531 .word _end - restart + 16384 + 1024*1024 532 .size LC0, . - LC0 533 534#ifdef CONFIG_ARCH_RPC 535 .globl params 536params: ldr r0, =0x10000100 @ params_phys for RPC 537 mov pc, lr 538 .ltorg 539 .align 540#endif 541 542/* 543 * Turn on the cache. We need to setup some page tables so that we 544 * can have both the I and D caches on. 545 * 546 * We place the page tables 16k down from the kernel execution address, 547 * and we hope that nothing else is using it. If we're using it, we 548 * will go pop! 549 * 550 * On entry, 551 * r4 = kernel execution address 552 * r7 = architecture number 553 * r8 = atags pointer 554 * On exit, 555 * r0, r1, r2, r3, r9, r10, r12 corrupted 556 * This routine must preserve: 557 * r4, r7, r8 558 */ 559 .align 5 560cache_on: mov r3, #8 @ cache_on function 561 b call_cache_fn 562 563/* 564 * Initialize the highest priority protection region, PR7 565 * to cover all 32bit address and cacheable and bufferable. 566 */ 567__armv4_mpu_cache_on: 568 mov r0, #0x3f @ 4G, the whole 569 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 570 mcr p15, 0, r0, c6, c7, 1 571 572 mov r0, #0x80 @ PR7 573 mcr p15, 0, r0, c2, c0, 0 @ D-cache on 574 mcr p15, 0, r0, c2, c0, 1 @ I-cache on 575 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 576 577 mov r0, #0xc000 578 mcr p15, 0, r0, c5, c0, 1 @ I-access permission 579 mcr p15, 0, r0, c5, c0, 0 @ D-access permission 580 581 mov r0, #0 582 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 583 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 584 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 585 mrc p15, 0, r0, c1, c0, 0 @ read control reg 586 @ ...I .... ..D. WC.M 587 orr r0, r0, #0x002d @ .... .... ..1. 11.1 588 orr r0, r0, #0x1000 @ ...1 .... .... .... 589 590 mcr p15, 0, r0, c1, c0, 0 @ write control reg 591 592 mov r0, #0 593 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 594 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 595 mov pc, lr 596 597__armv3_mpu_cache_on: 598 mov r0, #0x3f @ 4G, the whole 599 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 600 601 mov r0, #0x80 @ PR7 602 mcr p15, 0, r0, c2, c0, 0 @ cache on 603 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 604 605 mov r0, #0xc000 606 mcr p15, 0, r0, c5, c0, 0 @ access permission 607 608 mov r0, #0 609 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 610 /* 611 * ?? ARMv3 MMU does not allow reading the control register, 612 * does this really work on ARMv3 MPU? 613 */ 614 mrc p15, 0, r0, c1, c0, 0 @ read control reg 615 @ .... .... .... WC.M 616 orr r0, r0, #0x000d @ .... .... .... 11.1 617 /* ?? this overwrites the value constructed above? */ 618 mov r0, #0 619 mcr p15, 0, r0, c1, c0, 0 @ write control reg 620 621 /* ?? invalidate for the second time? */ 622 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 623 mov pc, lr 624 625#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 626#define CB_BITS 0x08 627#else 628#define CB_BITS 0x0c 629#endif 630 631__setup_mmu: sub r3, r4, #16384 @ Page directory size 632 bic r3, r3, #0xff @ Align the pointer 633 bic r3, r3, #0x3f00 634/* 635 * Initialise the page tables, turning on the cacheable and bufferable 636 * bits for the RAM area only. 637 */ 638 mov r0, r3 639 mov r9, r0, lsr #18 640 mov r9, r9, lsl #18 @ start of RAM 641 add r10, r9, #0x10000000 @ a reasonable RAM size 642 mov r1, #0x12 @ XN|U + section mapping 643 orr r1, r1, #3 << 10 @ AP=11 644 add r2, r3, #16384 6451: cmp r1, r9 @ if virt > start of RAM 646 cmphs r10, r1 @ && end of RAM > virt 647 bic r1, r1, #0x1c @ clear XN|U + C + B 648 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM 649 orrhs r1, r1, r6 @ set RAM section settings 650 str r1, [r0], #4 @ 1:1 mapping 651 add r1, r1, #1048576 652 teq r0, r2 653 bne 1b 654/* 655 * If ever we are running from Flash, then we surely want the cache 656 * to be enabled also for our execution instance... We map 2MB of it 657 * so there is no map overlap problem for up to 1 MB compressed kernel. 658 * If the execution is in RAM then we would only be duplicating the above. 659 */ 660 orr r1, r6, #0x04 @ ensure B is set for this 661 orr r1, r1, #3 << 10 662 mov r2, pc 663 mov r2, r2, lsr #20 664 orr r1, r1, r2, lsl #20 665 add r0, r3, r2, lsl #2 666 str r1, [r0], #4 667 add r1, r1, #1048576 668 str r1, [r0] 669 mov pc, lr 670ENDPROC(__setup_mmu) 671 672@ Enable unaligned access on v6, to allow better code generation 673@ for the decompressor C code: 674__armv6_mmu_cache_on: 675 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR 676 bic r0, r0, #2 @ A (no unaligned access fault) 677 orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 678 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR 679 b __armv4_mmu_cache_on 680 681__arm926ejs_mmu_cache_on: 682#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 683 mov r0, #4 @ put dcache in WT mode 684 mcr p15, 7, r0, c15, c0, 0 685#endif 686 687__armv4_mmu_cache_on: 688 mov r12, lr 689#ifdef CONFIG_MMU 690 mov r6, #CB_BITS | 0x12 @ U 691 bl __setup_mmu 692 mov r0, #0 693 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 694 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 695 mrc p15, 0, r0, c1, c0, 0 @ read control reg 696 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 697 orr r0, r0, #0x0030 698 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables 699 bl __common_mmu_cache_on 700 mov r0, #0 701 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 702#endif 703 mov pc, r12 704 705__armv7_mmu_cache_on: 706 mov r12, lr 707#ifdef CONFIG_MMU 708 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 709 tst r11, #0xf @ VMSA 710 movne r6, #CB_BITS | 0x02 @ !XN 711 blne __setup_mmu 712 mov r0, #0 713 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 714 tst r11, #0xf @ VMSA 715 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 716#endif 717 mrc p15, 0, r0, c1, c0, 0 @ read control reg 718 bic r0, r0, #1 << 28 @ clear SCTLR.TRE 719 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 720 orr r0, r0, #0x003c @ write buffer 721 bic r0, r0, #2 @ A (no unaligned access fault) 722 orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 723 @ (needed for ARM1176) 724#ifdef CONFIG_MMU 725 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables 726 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg 727 orrne r0, r0, #1 @ MMU enabled 728 movne r1, #0xfffffffd @ domain 0 = client 729 bic r6, r6, #1 << 31 @ 32-bit translation system 730 bic r6, r6, #3 << 0 @ use only ttbr0 731 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 732 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 733 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control 734#endif 735 mcr p15, 0, r0, c7, c5, 4 @ ISB 736 mcr p15, 0, r0, c1, c0, 0 @ load control register 737 mrc p15, 0, r0, c1, c0, 0 @ and read it back 738 mov r0, #0 739 mcr p15, 0, r0, c7, c5, 4 @ ISB 740 mov pc, r12 741 742__fa526_cache_on: 743 mov r12, lr 744 mov r6, #CB_BITS | 0x12 @ U 745 bl __setup_mmu 746 mov r0, #0 747 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache 748 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 749 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 750 mrc p15, 0, r0, c1, c0, 0 @ read control reg 751 orr r0, r0, #0x1000 @ I-cache enable 752 bl __common_mmu_cache_on 753 mov r0, #0 754 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 755 mov pc, r12 756 757__common_mmu_cache_on: 758#ifndef CONFIG_THUMB2_KERNEL 759#ifndef DEBUG 760 orr r0, r0, #0x000d @ Write buffer, mmu 761#endif 762 mov r1, #-1 763 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer 764 mcr p15, 0, r1, c3, c0, 0 @ load domain access control 765 b 1f 766 .align 5 @ cache line aligned 7671: mcr p15, 0, r0, c1, c0, 0 @ load control register 768 mrc p15, 0, r0, c1, c0, 0 @ and read it back to 769 sub pc, lr, r0, lsr #32 @ properly flush pipeline 770#endif 771 772#define PROC_ENTRY_SIZE (4*5) 773 774/* 775 * Here follow the relocatable cache support functions for the 776 * various processors. This is a generic hook for locating an 777 * entry and jumping to an instruction at the specified offset 778 * from the start of the block. Please note this is all position 779 * independent code. 780 * 781 * r1 = corrupted 782 * r2 = corrupted 783 * r3 = block offset 784 * r9 = corrupted 785 * r12 = corrupted 786 */ 787 788call_cache_fn: adr r12, proc_types 789#ifdef CONFIG_CPU_CP15 790 mrc p15, 0, r9, c0, c0 @ get processor ID 791#else 792 ldr r9, =CONFIG_PROCESSOR_ID 793#endif 7941: ldr r1, [r12, #0] @ get value 795 ldr r2, [r12, #4] @ get mask 796 eor r1, r1, r9 @ (real ^ match) 797 tst r1, r2 @ & mask 798 ARM( addeq pc, r12, r3 ) @ call cache function 799 THUMB( addeq r12, r3 ) 800 THUMB( moveq pc, r12 ) @ call cache function 801 add r12, r12, #PROC_ENTRY_SIZE 802 b 1b 803 804/* 805 * Table for cache operations. This is basically: 806 * - CPU ID match 807 * - CPU ID mask 808 * - 'cache on' method instruction 809 * - 'cache off' method instruction 810 * - 'cache flush' method instruction 811 * 812 * We match an entry using: ((real_id ^ match) & mask) == 0 813 * 814 * Writethrough caches generally only need 'on' and 'off' 815 * methods. Writeback caches _must_ have the flush method 816 * defined. 817 */ 818 .align 2 819 .type proc_types,#object 820proc_types: 821 .word 0x41000000 @ old ARM ID 822 .word 0xff00f000 823 mov pc, lr 824 THUMB( nop ) 825 mov pc, lr 826 THUMB( nop ) 827 mov pc, lr 828 THUMB( nop ) 829 830 .word 0x41007000 @ ARM7/710 831 .word 0xfff8fe00 832 mov pc, lr 833 THUMB( nop ) 834 mov pc, lr 835 THUMB( nop ) 836 mov pc, lr 837 THUMB( nop ) 838 839 .word 0x41807200 @ ARM720T (writethrough) 840 .word 0xffffff00 841 W(b) __armv4_mmu_cache_on 842 W(b) __armv4_mmu_cache_off 843 mov pc, lr 844 THUMB( nop ) 845 846 .word 0x41007400 @ ARM74x 847 .word 0xff00ff00 848 W(b) __armv3_mpu_cache_on 849 W(b) __armv3_mpu_cache_off 850 W(b) __armv3_mpu_cache_flush 851 852 .word 0x41009400 @ ARM94x 853 .word 0xff00ff00 854 W(b) __armv4_mpu_cache_on 855 W(b) __armv4_mpu_cache_off 856 W(b) __armv4_mpu_cache_flush 857 858 .word 0x41069260 @ ARM926EJ-S (v5TEJ) 859 .word 0xff0ffff0 860 W(b) __arm926ejs_mmu_cache_on 861 W(b) __armv4_mmu_cache_off 862 W(b) __armv5tej_mmu_cache_flush 863 864 .word 0x00007000 @ ARM7 IDs 865 .word 0x0000f000 866 mov pc, lr 867 THUMB( nop ) 868 mov pc, lr 869 THUMB( nop ) 870 mov pc, lr 871 THUMB( nop ) 872 873 @ Everything from here on will be the new ID system. 874 875 .word 0x4401a100 @ sa110 / sa1100 876 .word 0xffffffe0 877 W(b) __armv4_mmu_cache_on 878 W(b) __armv4_mmu_cache_off 879 W(b) __armv4_mmu_cache_flush 880 881 .word 0x6901b110 @ sa1110 882 .word 0xfffffff0 883 W(b) __armv4_mmu_cache_on 884 W(b) __armv4_mmu_cache_off 885 W(b) __armv4_mmu_cache_flush 886 887 .word 0x56056900 888 .word 0xffffff00 @ PXA9xx 889 W(b) __armv4_mmu_cache_on 890 W(b) __armv4_mmu_cache_off 891 W(b) __armv4_mmu_cache_flush 892 893 .word 0x56158000 @ PXA168 894 .word 0xfffff000 895 W(b) __armv4_mmu_cache_on 896 W(b) __armv4_mmu_cache_off 897 W(b) __armv5tej_mmu_cache_flush 898 899 .word 0x56050000 @ Feroceon 900 .word 0xff0f0000 901 W(b) __armv4_mmu_cache_on 902 W(b) __armv4_mmu_cache_off 903 W(b) __armv5tej_mmu_cache_flush 904 905#ifdef CONFIG_CPU_FEROCEON_OLD_ID 906 /* this conflicts with the standard ARMv5TE entry */ 907 .long 0x41009260 @ Old Feroceon 908 .long 0xff00fff0 909 b __armv4_mmu_cache_on 910 b __armv4_mmu_cache_off 911 b __armv5tej_mmu_cache_flush 912#endif 913 914 .word 0x66015261 @ FA526 915 .word 0xff01fff1 916 W(b) __fa526_cache_on 917 W(b) __armv4_mmu_cache_off 918 W(b) __fa526_cache_flush 919 920 @ These match on the architecture ID 921 922 .word 0x00020000 @ ARMv4T 923 .word 0x000f0000 924 W(b) __armv4_mmu_cache_on 925 W(b) __armv4_mmu_cache_off 926 W(b) __armv4_mmu_cache_flush 927 928 .word 0x00050000 @ ARMv5TE 929 .word 0x000f0000 930 W(b) __armv4_mmu_cache_on 931 W(b) __armv4_mmu_cache_off 932 W(b) __armv4_mmu_cache_flush 933 934 .word 0x00060000 @ ARMv5TEJ 935 .word 0x000f0000 936 W(b) __armv4_mmu_cache_on 937 W(b) __armv4_mmu_cache_off 938 W(b) __armv5tej_mmu_cache_flush 939 940 .word 0x0007b000 @ ARMv6 941 .word 0x000ff000 942 W(b) __armv6_mmu_cache_on 943 W(b) __armv4_mmu_cache_off 944 W(b) __armv6_mmu_cache_flush 945 946 .word 0x000f0000 @ new CPU Id 947 .word 0x000f0000 948 W(b) __armv7_mmu_cache_on 949 W(b) __armv7_mmu_cache_off 950 W(b) __armv7_mmu_cache_flush 951 952 .word 0 @ unrecognised type 953 .word 0 954 mov pc, lr 955 THUMB( nop ) 956 mov pc, lr 957 THUMB( nop ) 958 mov pc, lr 959 THUMB( nop ) 960 961 .size proc_types, . - proc_types 962 963 /* 964 * If you get a "non-constant expression in ".if" statement" 965 * error from the assembler on this line, check that you have 966 * not accidentally written a "b" instruction where you should 967 * have written W(b). 968 */ 969 .if (. - proc_types) % PROC_ENTRY_SIZE != 0 970 .error "The size of one or more proc_types entries is wrong." 971 .endif 972 973/* 974 * Turn off the Cache and MMU. ARMv3 does not support 975 * reading the control register, but ARMv4 does. 976 * 977 * On exit, 978 * r0, r1, r2, r3, r9, r12 corrupted 979 * This routine must preserve: 980 * r4, r7, r8 981 */ 982 .align 5 983cache_off: mov r3, #12 @ cache_off function 984 b call_cache_fn 985 986__armv4_mpu_cache_off: 987 mrc p15, 0, r0, c1, c0 988 bic r0, r0, #0x000d 989 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off 990 mov r0, #0 991 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 992 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache 993 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache 994 mov pc, lr 995 996__armv3_mpu_cache_off: 997 mrc p15, 0, r0, c1, c0 998 bic r0, r0, #0x000d 999 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off 1000 mov r0, #0 1001 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 1002 mov pc, lr 1003 1004__armv4_mmu_cache_off: 1005#ifdef CONFIG_MMU 1006 mrc p15, 0, r0, c1, c0 1007 bic r0, r0, #0x000d 1008 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 1009 mov r0, #0 1010 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 1011 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 1012#endif 1013 mov pc, lr 1014 1015__armv7_mmu_cache_off: 1016 mrc p15, 0, r0, c1, c0 1017#ifdef CONFIG_MMU 1018 bic r0, r0, #0x000d 1019#else 1020 bic r0, r0, #0x000c 1021#endif 1022 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 1023 mov r12, lr 1024 bl __armv7_mmu_cache_flush 1025 mov r0, #0 1026#ifdef CONFIG_MMU 1027 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB 1028#endif 1029 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC 1030 mcr p15, 0, r0, c7, c10, 4 @ DSB 1031 mcr p15, 0, r0, c7, c5, 4 @ ISB 1032 mov pc, r12 1033 1034/* 1035 * Clean and flush the cache to maintain consistency. 1036 * 1037 * On exit, 1038 * r1, r2, r3, r9, r10, r11, r12 corrupted 1039 * This routine must preserve: 1040 * r4, r6, r7, r8 1041 */ 1042 .align 5 1043cache_clean_flush: 1044 mov r3, #16 1045 b call_cache_fn 1046 1047__armv4_mpu_cache_flush: 1048 mov r2, #1 1049 mov r3, #0 1050 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 1051 mov r1, #7 << 5 @ 8 segments 10521: orr r3, r1, #63 << 26 @ 64 entries 10532: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 1054 subs r3, r3, #1 << 26 1055 bcs 2b @ entries 63 to 0 1056 subs r1, r1, #1 << 5 1057 bcs 1b @ segments 7 to 0 1058 1059 teq r2, #0 1060 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 1061 mcr p15, 0, ip, c7, c10, 4 @ drain WB 1062 mov pc, lr 1063 1064__fa526_cache_flush: 1065 mov r1, #0 1066 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache 1067 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1068 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1069 mov pc, lr 1070 1071__armv6_mmu_cache_flush: 1072 mov r1, #0 1073 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D 1074 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB 1075 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified 1076 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1077 mov pc, lr 1078 1079__armv7_mmu_cache_flush: 1080 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 1081 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) 1082 mov r10, #0 1083 beq hierarchical 1084 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D 1085 b iflush 1086hierarchical: 1087 mcr p15, 0, r10, c7, c10, 5 @ DMB 1088 stmfd sp!, {r0-r7, r9-r11} 1089 mrc p15, 1, r0, c0, c0, 1 @ read clidr 1090 ands r3, r0, #0x7000000 @ extract loc from clidr 1091 mov r3, r3, lsr #23 @ left align loc bit field 1092 beq finished @ if loc is 0, then no need to clean 1093 mov r10, #0 @ start clean at cache level 0 1094loop1: 1095 add r2, r10, r10, lsr #1 @ work out 3x current cache level 1096 mov r1, r0, lsr r2 @ extract cache type bits from clidr 1097 and r1, r1, #7 @ mask of the bits for current cache only 1098 cmp r1, #2 @ see what cache we have at this level 1099 blt skip @ skip if no cache, or just i-cache 1100 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 1101 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr 1102 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 1103 and r2, r1, #7 @ extract the length of the cache lines 1104 add r2, r2, #4 @ add 4 (line length offset) 1105 ldr r4, =0x3ff 1106 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 1107 clz r5, r4 @ find bit position of way size increment 1108 ldr r7, =0x7fff 1109 ands r7, r7, r1, lsr #13 @ extract max number of the index size 1110loop2: 1111 mov r9, r4 @ create working copy of max way size 1112loop3: 1113 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 1114 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 1115 THUMB( lsl r6, r9, r5 ) 1116 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 1117 THUMB( lsl r6, r7, r2 ) 1118 THUMB( orr r11, r11, r6 ) @ factor index number into r11 1119 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 1120 subs r9, r9, #1 @ decrement the way 1121 bge loop3 1122 subs r7, r7, #1 @ decrement the index 1123 bge loop2 1124skip: 1125 add r10, r10, #2 @ increment cache number 1126 cmp r3, r10 1127 bgt loop1 1128finished: 1129 ldmfd sp!, {r0-r7, r9-r11} 1130 mov r10, #0 @ swith back to cache level 0 1131 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 1132iflush: 1133 mcr p15, 0, r10, c7, c10, 4 @ DSB 1134 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB 1135 mcr p15, 0, r10, c7, c10, 4 @ DSB 1136 mcr p15, 0, r10, c7, c5, 4 @ ISB 1137 mov pc, lr 1138 1139__armv5tej_mmu_cache_flush: 11401: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache 1141 bne 1b 1142 mcr p15, 0, r0, c7, c5, 0 @ flush I cache 1143 mcr p15, 0, r0, c7, c10, 4 @ drain WB 1144 mov pc, lr 1145 1146__armv4_mmu_cache_flush: 1147 mov r2, #64*1024 @ default: 32K dcache size (*2) 1148 mov r11, #32 @ default: 32 byte line size 1149 mrc p15, 0, r3, c0, c0, 1 @ read cache type 1150 teq r3, r9 @ cache ID register present? 1151 beq no_cache_id 1152 mov r1, r3, lsr #18 1153 and r1, r1, #7 1154 mov r2, #1024 1155 mov r2, r2, lsl r1 @ base dcache size *2 1156 tst r3, #1 << 14 @ test M bit 1157 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 1158 mov r3, r3, lsr #12 1159 and r3, r3, #3 1160 mov r11, #8 1161 mov r11, r11, lsl r3 @ cache line size in bytes 1162no_cache_id: 1163 mov r1, pc 1164 bic r1, r1, #63 @ align to longest cache line 1165 add r2, r1, r2 11661: 1167 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache 1168 THUMB( ldr r3, [r1] ) @ s/w flush D cache 1169 THUMB( add r1, r1, r11 ) 1170 teq r1, r2 1171 bne 1b 1172 1173 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1174 mcr p15, 0, r1, c7, c6, 0 @ flush D cache 1175 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1176 mov pc, lr 1177 1178__armv3_mmu_cache_flush: 1179__armv3_mpu_cache_flush: 1180 mov r1, #0 1181 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 1182 mov pc, lr 1183 1184/* 1185 * Various debugging routines for printing hex characters and 1186 * memory, which again must be relocatable. 1187 */ 1188#ifdef DEBUG 1189 .align 2 1190 .type phexbuf,#object 1191phexbuf: .space 12 1192 .size phexbuf, . - phexbuf 1193 1194@ phex corrupts {r0, r1, r2, r3} 1195phex: adr r3, phexbuf 1196 mov r2, #0 1197 strb r2, [r3, r1] 11981: subs r1, r1, #1 1199 movmi r0, r3 1200 bmi puts 1201 and r2, r0, #15 1202 mov r0, r0, lsr #4 1203 cmp r2, #10 1204 addge r2, r2, #7 1205 add r2, r2, #'0' 1206 strb r2, [r3, r1] 1207 b 1b 1208 1209@ puts corrupts {r0, r1, r2, r3} 1210puts: loadsp r3, r1 12111: ldrb r2, [r0], #1 1212 teq r2, #0 1213 moveq pc, lr 12142: writeb r2, r3 1215 mov r1, #0x00020000 12163: subs r1, r1, #1 1217 bne 3b 1218 teq r2, #'\n' 1219 moveq r2, #'\r' 1220 beq 2b 1221 teq r0, #0 1222 bne 1b 1223 mov pc, lr 1224@ putc corrupts {r0, r1, r2, r3} 1225putc: 1226 mov r2, r0 1227 mov r0, #0 1228 loadsp r3, r1 1229 b 2b 1230 1231@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} 1232memdump: mov r12, r0 1233 mov r10, lr 1234 mov r11, #0 12352: mov r0, r11, lsl #2 1236 add r0, r0, r12 1237 mov r1, #8 1238 bl phex 1239 mov r0, #':' 1240 bl putc 12411: mov r0, #' ' 1242 bl putc 1243 ldr r0, [r12, r11, lsl #2] 1244 mov r1, #8 1245 bl phex 1246 and r0, r11, #7 1247 teq r0, #3 1248 moveq r0, #' ' 1249 bleq putc 1250 and r0, r11, #7 1251 add r11, r11, #1 1252 teq r0, #7 1253 bne 1b 1254 mov r0, #'\n' 1255 bl putc 1256 cmp r11, #64 1257 blt 2b 1258 mov pc, r10 1259#endif 1260 1261 .ltorg 1262 1263#ifdef CONFIG_ARM_VIRT_EXT 1264.align 5 1265__hyp_reentry_vectors: 1266 W(b) . @ reset 1267 W(b) . @ undef 1268 W(b) . @ svc 1269 W(b) . @ pabort 1270 W(b) . @ dabort 1271 W(b) __enter_kernel @ hyp 1272 W(b) . @ irq 1273 W(b) . @ fiq 1274#endif /* CONFIG_ARM_VIRT_EXT */ 1275 1276__enter_kernel: 1277 mov r0, #0 @ must be 0 1278 ARM( mov pc, r4 ) @ call kernel 1279 THUMB( bx r4 ) @ entry point is always ARM 1280 1281reloc_code_end: 1282 1283 .align 1284 .section ".stack", "aw", %nobits 1285.L_user_stack: .space 4096 1286.L_user_stack_end: 1287