1/* 2 * linux/arch/arm/boot/compressed/head.S 3 * 4 * Copyright (C) 1996-2002 Russell King 5 * Copyright (C) 2004 Hyok S. Choi (MPU support) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/linkage.h> 12#include <asm/assembler.h> 13 14 .arch armv7-a 15/* 16 * Debugging stuff 17 * 18 * Note that these macros must not contain any code which is not 19 * 100% relocatable. Any attempt to do so will result in a crash. 20 * Please select one of the following when turning on debugging. 21 */ 22#ifdef DEBUG 23 24#if defined(CONFIG_DEBUG_ICEDCC) 25 26#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 27 .macro loadsp, rb, tmp 28 .endm 29 .macro writeb, ch, rb 30 mcr p14, 0, \ch, c0, c5, 0 31 .endm 32#elif defined(CONFIG_CPU_XSCALE) 33 .macro loadsp, rb, tmp 34 .endm 35 .macro writeb, ch, rb 36 mcr p14, 0, \ch, c8, c0, 0 37 .endm 38#else 39 .macro loadsp, rb, tmp 40 .endm 41 .macro writeb, ch, rb 42 mcr p14, 0, \ch, c1, c0, 0 43 .endm 44#endif 45 46#else 47 48#include CONFIG_DEBUG_LL_INCLUDE 49 50 .macro writeb, ch, rb 51 senduart \ch, \rb 52 .endm 53 54#if defined(CONFIG_ARCH_SA1100) 55 .macro loadsp, rb, tmp 56 mov \rb, #0x80000000 @ physical base address 57#ifdef CONFIG_DEBUG_LL_SER3 58 add \rb, \rb, #0x00050000 @ Ser3 59#else 60 add \rb, \rb, #0x00010000 @ Ser1 61#endif 62 .endm 63#elif defined(CONFIG_ARCH_S3C24XX) 64 .macro loadsp, rb, tmp 65 mov \rb, #0x50000000 66 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT 67 .endm 68#else 69 .macro loadsp, rb, tmp 70 addruart \rb, \tmp 71 .endm 72#endif 73#endif 74#endif 75 76 .macro kputc,val 77 mov r0, \val 78 bl putc 79 .endm 80 81 .macro kphex,val,len 82 mov r0, \val 83 mov r1, #\len 84 bl phex 85 .endm 86 87 .macro debug_reloc_start 88#ifdef DEBUG 89 kputc #'\n' 90 kphex r6, 8 /* processor id */ 91 kputc #':' 92 kphex r7, 8 /* architecture id */ 93#ifdef CONFIG_CPU_CP15 94 kputc #':' 95 mrc p15, 0, r0, c1, c0 96 kphex r0, 8 /* control reg */ 97#endif 98 kputc #'\n' 99 kphex r5, 8 /* decompressed kernel start */ 100 kputc #'-' 101 kphex r9, 8 /* decompressed kernel end */ 102 kputc #'>' 103 kphex r4, 8 /* kernel execution address */ 104 kputc #'\n' 105#endif 106 .endm 107 108 .macro debug_reloc_end 109#ifdef DEBUG 110 kphex r5, 8 /* end of kernel */ 111 kputc #'\n' 112 mov r0, r4 113 bl memdump /* dump 256 bytes at start of kernel */ 114#endif 115 .endm 116 117 .section ".start", #alloc, #execinstr 118/* 119 * sort out different calling conventions 120 */ 121 .align 122 .arm @ Always enter in ARM state 123start: 124 .type start,#function 125 .rept 7 126 mov r0, r0 127 .endr 128 ARM( mov r0, r0 ) 129 ARM( b 1f ) 130 THUMB( adr r12, BSYM(1f) ) 131 THUMB( bx r12 ) 132 133 .word 0x016f2818 @ Magic numbers to help the loader 134 .word start @ absolute load/run zImage address 135 .word _edata @ zImage end address 136 THUMB( .thumb ) 1371: 138 mrs r9, cpsr 139#ifdef CONFIG_ARM_VIRT_EXT 140 bl __hyp_stub_install @ get into SVC mode, reversibly 141#endif 142 mov r7, r1 @ save architecture ID 143 mov r8, r2 @ save atags pointer 144 145 /* 146 * Booting from Angel - need to enter SVC mode and disable 147 * FIQs/IRQs (numeric definitions from angel arm.h source). 148 * We only do this if we were in user mode on entry. 149 */ 150 mrs r2, cpsr @ get current mode 151 tst r2, #3 @ not user? 152 bne not_angel 153 mov r0, #0x17 @ angel_SWIreason_EnterSVC 154 ARM( swi 0x123456 ) @ angel_SWI_ARM 155 THUMB( svc 0xab ) @ angel_SWI_THUMB 156not_angel: 157 safe_svcmode_maskall r0 158 msr spsr_cxsf, r9 @ Save the CPU boot mode in 159 @ SPSR 160 /* 161 * Note that some cache flushing and other stuff may 162 * be needed here - is there an Angel SWI call for this? 163 */ 164 165 /* 166 * some architecture specific code can be inserted 167 * by the linker here, but it should preserve r7, r8, and r9. 168 */ 169 170 .text 171 172#ifdef CONFIG_AUTO_ZRELADDR 173 @ determine final kernel image address 174 mov r4, pc 175 and r4, r4, #0xf8000000 176 add r4, r4, #TEXT_OFFSET 177#else 178 ldr r4, =zreladdr 179#endif 180 181 /* 182 * Set up a page table only if it won't overwrite ourself. 183 * That means r4 < pc && r4 - 16k page directory > &_end. 184 * Given that r4 > &_end is most unfrequent, we add a rough 185 * additional 1MB of room for a possible appended DTB. 186 */ 187 mov r0, pc 188 cmp r0, r4 189 ldrcc r0, LC0+32 190 addcc r0, r0, pc 191 cmpcc r4, r0 192 orrcc r4, r4, #1 @ remember we skipped cache_on 193 blcs cache_on 194 195restart: adr r0, LC0 196 ldmia r0, {r1, r2, r3, r6, r10, r11, r12} 197 ldr sp, [r0, #28] 198 199 /* 200 * We might be running at a different address. We need 201 * to fix up various pointers. 202 */ 203 sub r0, r0, r1 @ calculate the delta offset 204 add r6, r6, r0 @ _edata 205 add r10, r10, r0 @ inflated kernel size location 206 207 /* 208 * The kernel build system appends the size of the 209 * decompressed kernel at the end of the compressed data 210 * in little-endian form. 211 */ 212 ldrb r9, [r10, #0] 213 ldrb lr, [r10, #1] 214 orr r9, r9, lr, lsl #8 215 ldrb lr, [r10, #2] 216 ldrb r10, [r10, #3] 217 orr r9, r9, lr, lsl #16 218 orr r9, r9, r10, lsl #24 219 220#ifndef CONFIG_ZBOOT_ROM 221 /* malloc space is above the relocated stack (64k max) */ 222 add sp, sp, r0 223 add r10, sp, #0x10000 224#else 225 /* 226 * With ZBOOT_ROM the bss/stack is non relocatable, 227 * but someone could still run this code from RAM, 228 * in which case our reference is _edata. 229 */ 230 mov r10, r6 231#endif 232 233 mov r5, #0 @ init dtb size to 0 234#ifdef CONFIG_ARM_APPENDED_DTB 235/* 236 * r0 = delta 237 * r2 = BSS start 238 * r3 = BSS end 239 * r4 = final kernel address (possibly with LSB set) 240 * r5 = appended dtb size (still unknown) 241 * r6 = _edata 242 * r7 = architecture ID 243 * r8 = atags/device tree pointer 244 * r9 = size of decompressed image 245 * r10 = end of this image, including bss/stack/malloc space if non XIP 246 * r11 = GOT start 247 * r12 = GOT end 248 * sp = stack pointer 249 * 250 * if there are device trees (dtb) appended to zImage, advance r10 so that the 251 * dtb data will get relocated along with the kernel if necessary. 252 */ 253 254 ldr lr, [r6, #0] 255#ifndef __ARMEB__ 256 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian 257#else 258 ldr r1, =0xd00dfeed 259#endif 260 cmp lr, r1 261 bne dtb_check_done @ not found 262 263#ifdef CONFIG_ARM_ATAG_DTB_COMPAT 264 /* 265 * OK... Let's do some funky business here. 266 * If we do have a DTB appended to zImage, and we do have 267 * an ATAG list around, we want the later to be translated 268 * and folded into the former here. To be on the safe side, 269 * let's temporarily move the stack away into the malloc 270 * area. No GOT fixup has occurred yet, but none of the 271 * code we're about to call uses any global variable. 272 */ 273 add sp, sp, #0x10000 274 stmfd sp!, {r0-r3, ip, lr} 275 mov r0, r8 276 mov r1, r6 277 sub r2, sp, r6 278 bl atags_to_fdt 279 280 /* 281 * If returned value is 1, there is no ATAG at the location 282 * pointed by r8. Try the typical 0x100 offset from start 283 * of RAM and hope for the best. 284 */ 285 cmp r0, #1 286 sub r0, r4, #TEXT_OFFSET 287 bic r0, r0, #1 288 add r0, r0, #0x100 289 mov r1, r6 290 sub r2, sp, r6 291 bleq atags_to_fdt 292 293 ldmfd sp!, {r0-r3, ip, lr} 294 sub sp, sp, #0x10000 295#endif 296 297 mov r8, r6 @ use the appended device tree 298 299 /* 300 * Make sure that the DTB doesn't end up in the final 301 * kernel's .bss area. To do so, we adjust the decompressed 302 * kernel size to compensate if that .bss size is larger 303 * than the relocated code. 304 */ 305 ldr r5, =_kernel_bss_size 306 adr r1, wont_overwrite 307 sub r1, r6, r1 308 subs r1, r5, r1 309 addhi r9, r9, r1 310 311 /* Get the dtb's size */ 312 ldr r5, [r6, #4] 313#ifndef __ARMEB__ 314 /* convert r5 (dtb size) to little endian */ 315 eor r1, r5, r5, ror #16 316 bic r1, r1, #0x00ff0000 317 mov r5, r5, ror #8 318 eor r5, r5, r1, lsr #8 319#endif 320 321 /* preserve 64-bit alignment */ 322 add r5, r5, #7 323 bic r5, r5, #7 324 325 /* relocate some pointers past the appended dtb */ 326 add r6, r6, r5 327 add r10, r10, r5 328 add sp, sp, r5 329dtb_check_done: 330#endif 331 332/* 333 * Check to see if we will overwrite ourselves. 334 * r4 = final kernel address (possibly with LSB set) 335 * r9 = size of decompressed image 336 * r10 = end of this image, including bss/stack/malloc space if non XIP 337 * We basically want: 338 * r4 - 16k page directory >= r10 -> OK 339 * r4 + image length <= address of wont_overwrite -> OK 340 * Note: the possible LSB in r4 is harmless here. 341 */ 342 add r10, r10, #16384 343 cmp r4, r10 344 bhs wont_overwrite 345 add r10, r4, r9 346 adr r9, wont_overwrite 347 cmp r10, r9 348 bls wont_overwrite 349 350/* 351 * Relocate ourselves past the end of the decompressed kernel. 352 * r6 = _edata 353 * r10 = end of the decompressed kernel 354 * Because we always copy ahead, we need to do it from the end and go 355 * backward in case the source and destination overlap. 356 */ 357 /* 358 * Bump to the next 256-byte boundary with the size of 359 * the relocation code added. This avoids overwriting 360 * ourself when the offset is small. 361 */ 362 add r10, r10, #((reloc_code_end - restart + 256) & ~255) 363 bic r10, r10, #255 364 365 /* Get start of code we want to copy and align it down. */ 366 adr r5, restart 367 bic r5, r5, #31 368 369/* Relocate the hyp vector base if necessary */ 370#ifdef CONFIG_ARM_VIRT_EXT 371 mrs r0, spsr 372 and r0, r0, #MODE_MASK 373 cmp r0, #HYP_MODE 374 bne 1f 375 376 bl __hyp_get_vectors 377 sub r0, r0, r5 378 add r0, r0, r10 379 bl __hyp_set_vectors 3801: 381#endif 382 383 sub r9, r6, r5 @ size to copy 384 add r9, r9, #31 @ rounded up to a multiple 385 bic r9, r9, #31 @ ... of 32 bytes 386 add r6, r9, r5 387 add r9, r9, r10 388 3891: ldmdb r6!, {r0 - r3, r10 - r12, lr} 390 cmp r6, r5 391 stmdb r9!, {r0 - r3, r10 - r12, lr} 392 bhi 1b 393 394 /* Preserve offset to relocated code. */ 395 sub r6, r9, r6 396 397#ifndef CONFIG_ZBOOT_ROM 398 /* cache_clean_flush may use the stack, so relocate it */ 399 add sp, sp, r6 400#endif 401 402 tst r4, #1 403 bleq cache_clean_flush 404 405 adr r0, BSYM(restart) 406 add r0, r0, r6 407 mov pc, r0 408 409wont_overwrite: 410/* 411 * If delta is zero, we are running at the address we were linked at. 412 * r0 = delta 413 * r2 = BSS start 414 * r3 = BSS end 415 * r4 = kernel execution address (possibly with LSB set) 416 * r5 = appended dtb size (0 if not present) 417 * r7 = architecture ID 418 * r8 = atags pointer 419 * r11 = GOT start 420 * r12 = GOT end 421 * sp = stack pointer 422 */ 423 orrs r1, r0, r5 424 beq not_relocated 425 426 add r11, r11, r0 427 add r12, r12, r0 428 429#ifndef CONFIG_ZBOOT_ROM 430 /* 431 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, 432 * we need to fix up pointers into the BSS region. 433 * Note that the stack pointer has already been fixed up. 434 */ 435 add r2, r2, r0 436 add r3, r3, r0 437 438 /* 439 * Relocate all entries in the GOT table. 440 * Bump bss entries to _edata + dtb size 441 */ 4421: ldr r1, [r11, #0] @ relocate entries in the GOT 443 add r1, r1, r0 @ This fixes up C references 444 cmp r1, r2 @ if entry >= bss_start && 445 cmphs r3, r1 @ bss_end > entry 446 addhi r1, r1, r5 @ entry += dtb size 447 str r1, [r11], #4 @ next entry 448 cmp r11, r12 449 blo 1b 450 451 /* bump our bss pointers too */ 452 add r2, r2, r5 453 add r3, r3, r5 454 455#else 456 457 /* 458 * Relocate entries in the GOT table. We only relocate 459 * the entries that are outside the (relocated) BSS region. 460 */ 4611: ldr r1, [r11, #0] @ relocate entries in the GOT 462 cmp r1, r2 @ entry < bss_start || 463 cmphs r3, r1 @ _end < entry 464 addlo r1, r1, r0 @ table. This fixes up the 465 str r1, [r11], #4 @ C references. 466 cmp r11, r12 467 blo 1b 468#endif 469 470not_relocated: mov r0, #0 4711: str r0, [r2], #4 @ clear bss 472 str r0, [r2], #4 473 str r0, [r2], #4 474 str r0, [r2], #4 475 cmp r2, r3 476 blo 1b 477 478 /* 479 * Did we skip the cache setup earlier? 480 * That is indicated by the LSB in r4. 481 * Do it now if so. 482 */ 483 tst r4, #1 484 bic r4, r4, #1 485 blne cache_on 486 487/* 488 * The C runtime environment should now be setup sufficiently. 489 * Set up some pointers, and start decompressing. 490 * r4 = kernel execution address 491 * r7 = architecture ID 492 * r8 = atags pointer 493 */ 494 mov r0, r4 495 mov r1, sp @ malloc space above stack 496 add r2, sp, #0x10000 @ 64k max 497 mov r3, r7 498 bl decompress_kernel 499 bl cache_clean_flush 500 bl cache_off 501 mov r1, r7 @ restore architecture number 502 mov r2, r8 @ restore atags pointer 503 504#ifdef CONFIG_ARM_VIRT_EXT 505 mrs r0, spsr @ Get saved CPU boot mode 506 and r0, r0, #MODE_MASK 507 cmp r0, #HYP_MODE @ if not booted in HYP mode... 508 bne __enter_kernel @ boot kernel directly 509 510 adr r12, .L__hyp_reentry_vectors_offset 511 ldr r0, [r12] 512 add r0, r0, r12 513 514 bl __hyp_set_vectors 515 __HVC(0) @ otherwise bounce to hyp mode 516 517 b . @ should never be reached 518 519 .align 2 520.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - . 521#else 522 b __enter_kernel 523#endif 524 525 .align 2 526 .type LC0, #object 527LC0: .word LC0 @ r1 528 .word __bss_start @ r2 529 .word _end @ r3 530 .word _edata @ r6 531 .word input_data_end - 4 @ r10 (inflated size location) 532 .word _got_start @ r11 533 .word _got_end @ ip 534 .word .L_user_stack_end @ sp 535 .word _end - restart + 16384 + 1024*1024 536 .size LC0, . - LC0 537 538#ifdef CONFIG_ARCH_RPC 539 .globl params 540params: ldr r0, =0x10000100 @ params_phys for RPC 541 mov pc, lr 542 .ltorg 543 .align 544#endif 545 546/* 547 * Turn on the cache. We need to setup some page tables so that we 548 * can have both the I and D caches on. 549 * 550 * We place the page tables 16k down from the kernel execution address, 551 * and we hope that nothing else is using it. If we're using it, we 552 * will go pop! 553 * 554 * On entry, 555 * r4 = kernel execution address 556 * r7 = architecture number 557 * r8 = atags pointer 558 * On exit, 559 * r0, r1, r2, r3, r9, r10, r12 corrupted 560 * This routine must preserve: 561 * r4, r7, r8 562 */ 563 .align 5 564cache_on: mov r3, #8 @ cache_on function 565 b call_cache_fn 566 567/* 568 * Initialize the highest priority protection region, PR7 569 * to cover all 32bit address and cacheable and bufferable. 570 */ 571__armv4_mpu_cache_on: 572 mov r0, #0x3f @ 4G, the whole 573 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 574 mcr p15, 0, r0, c6, c7, 1 575 576 mov r0, #0x80 @ PR7 577 mcr p15, 0, r0, c2, c0, 0 @ D-cache on 578 mcr p15, 0, r0, c2, c0, 1 @ I-cache on 579 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 580 581 mov r0, #0xc000 582 mcr p15, 0, r0, c5, c0, 1 @ I-access permission 583 mcr p15, 0, r0, c5, c0, 0 @ D-access permission 584 585 mov r0, #0 586 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 587 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 588 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 589 mrc p15, 0, r0, c1, c0, 0 @ read control reg 590 @ ...I .... ..D. WC.M 591 orr r0, r0, #0x002d @ .... .... ..1. 11.1 592 orr r0, r0, #0x1000 @ ...1 .... .... .... 593 594 mcr p15, 0, r0, c1, c0, 0 @ write control reg 595 596 mov r0, #0 597 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 598 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 599 mov pc, lr 600 601__armv3_mpu_cache_on: 602 mov r0, #0x3f @ 4G, the whole 603 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 604 605 mov r0, #0x80 @ PR7 606 mcr p15, 0, r0, c2, c0, 0 @ cache on 607 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 608 609 mov r0, #0xc000 610 mcr p15, 0, r0, c5, c0, 0 @ access permission 611 612 mov r0, #0 613 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 614 /* 615 * ?? ARMv3 MMU does not allow reading the control register, 616 * does this really work on ARMv3 MPU? 617 */ 618 mrc p15, 0, r0, c1, c0, 0 @ read control reg 619 @ .... .... .... WC.M 620 orr r0, r0, #0x000d @ .... .... .... 11.1 621 /* ?? this overwrites the value constructed above? */ 622 mov r0, #0 623 mcr p15, 0, r0, c1, c0, 0 @ write control reg 624 625 /* ?? invalidate for the second time? */ 626 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 627 mov pc, lr 628 629#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 630#define CB_BITS 0x08 631#else 632#define CB_BITS 0x0c 633#endif 634 635__setup_mmu: sub r3, r4, #16384 @ Page directory size 636 bic r3, r3, #0xff @ Align the pointer 637 bic r3, r3, #0x3f00 638/* 639 * Initialise the page tables, turning on the cacheable and bufferable 640 * bits for the RAM area only. 641 */ 642 mov r0, r3 643 mov r9, r0, lsr #18 644 mov r9, r9, lsl #18 @ start of RAM 645 add r10, r9, #0x10000000 @ a reasonable RAM size 646 mov r1, #0x12 @ XN|U + section mapping 647 orr r1, r1, #3 << 10 @ AP=11 648 add r2, r3, #16384 6491: cmp r1, r9 @ if virt > start of RAM 650 cmphs r10, r1 @ && end of RAM > virt 651 bic r1, r1, #0x1c @ clear XN|U + C + B 652 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM 653 orrhs r1, r1, r6 @ set RAM section settings 654 str r1, [r0], #4 @ 1:1 mapping 655 add r1, r1, #1048576 656 teq r0, r2 657 bne 1b 658/* 659 * If ever we are running from Flash, then we surely want the cache 660 * to be enabled also for our execution instance... We map 2MB of it 661 * so there is no map overlap problem for up to 1 MB compressed kernel. 662 * If the execution is in RAM then we would only be duplicating the above. 663 */ 664 orr r1, r6, #0x04 @ ensure B is set for this 665 orr r1, r1, #3 << 10 666 mov r2, pc 667 mov r2, r2, lsr #20 668 orr r1, r1, r2, lsl #20 669 add r0, r3, r2, lsl #2 670 str r1, [r0], #4 671 add r1, r1, #1048576 672 str r1, [r0] 673 mov pc, lr 674ENDPROC(__setup_mmu) 675 676@ Enable unaligned access on v6, to allow better code generation 677@ for the decompressor C code: 678__armv6_mmu_cache_on: 679 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR 680 bic r0, r0, #2 @ A (no unaligned access fault) 681 orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 682 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR 683 b __armv4_mmu_cache_on 684 685__arm926ejs_mmu_cache_on: 686#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 687 mov r0, #4 @ put dcache in WT mode 688 mcr p15, 7, r0, c15, c0, 0 689#endif 690 691__armv4_mmu_cache_on: 692 mov r12, lr 693#ifdef CONFIG_MMU 694 mov r6, #CB_BITS | 0x12 @ U 695 bl __setup_mmu 696 mov r0, #0 697 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 698 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 699 mrc p15, 0, r0, c1, c0, 0 @ read control reg 700 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 701 orr r0, r0, #0x0030 702#ifdef CONFIG_CPU_ENDIAN_BE8 703 orr r0, r0, #1 << 25 @ big-endian page tables 704#endif 705 bl __common_mmu_cache_on 706 mov r0, #0 707 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 708#endif 709 mov pc, r12 710 711__armv7_mmu_cache_on: 712 mov r12, lr 713#ifdef CONFIG_MMU 714 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 715 tst r11, #0xf @ VMSA 716 movne r6, #CB_BITS | 0x02 @ !XN 717 blne __setup_mmu 718 mov r0, #0 719 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 720 tst r11, #0xf @ VMSA 721 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 722#endif 723 mrc p15, 0, r0, c1, c0, 0 @ read control reg 724 bic r0, r0, #1 << 28 @ clear SCTLR.TRE 725 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 726 orr r0, r0, #0x003c @ write buffer 727 bic r0, r0, #2 @ A (no unaligned access fault) 728 orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 729 @ (needed for ARM1176) 730#ifdef CONFIG_MMU 731#ifdef CONFIG_CPU_ENDIAN_BE8 732 orr r0, r0, #1 << 25 @ big-endian page tables 733#endif 734 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg 735 orrne r0, r0, #1 @ MMU enabled 736 movne r1, #0xfffffffd @ domain 0 = client 737 bic r6, r6, #1 << 31 @ 32-bit translation system 738 bic r6, r6, #3 << 0 @ use only ttbr0 739 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 740 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 741 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control 742#endif 743 mcr p15, 0, r0, c7, c5, 4 @ ISB 744 mcr p15, 0, r0, c1, c0, 0 @ load control register 745 mrc p15, 0, r0, c1, c0, 0 @ and read it back 746 mov r0, #0 747 mcr p15, 0, r0, c7, c5, 4 @ ISB 748 mov pc, r12 749 750__fa526_cache_on: 751 mov r12, lr 752 mov r6, #CB_BITS | 0x12 @ U 753 bl __setup_mmu 754 mov r0, #0 755 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache 756 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 757 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 758 mrc p15, 0, r0, c1, c0, 0 @ read control reg 759 orr r0, r0, #0x1000 @ I-cache enable 760 bl __common_mmu_cache_on 761 mov r0, #0 762 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 763 mov pc, r12 764 765__common_mmu_cache_on: 766#ifndef CONFIG_THUMB2_KERNEL 767#ifndef DEBUG 768 orr r0, r0, #0x000d @ Write buffer, mmu 769#endif 770 mov r1, #-1 771 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer 772 mcr p15, 0, r1, c3, c0, 0 @ load domain access control 773 b 1f 774 .align 5 @ cache line aligned 7751: mcr p15, 0, r0, c1, c0, 0 @ load control register 776 mrc p15, 0, r0, c1, c0, 0 @ and read it back to 777 sub pc, lr, r0, lsr #32 @ properly flush pipeline 778#endif 779 780#define PROC_ENTRY_SIZE (4*5) 781 782/* 783 * Here follow the relocatable cache support functions for the 784 * various processors. This is a generic hook for locating an 785 * entry and jumping to an instruction at the specified offset 786 * from the start of the block. Please note this is all position 787 * independent code. 788 * 789 * r1 = corrupted 790 * r2 = corrupted 791 * r3 = block offset 792 * r9 = corrupted 793 * r12 = corrupted 794 */ 795 796call_cache_fn: adr r12, proc_types 797#ifdef CONFIG_CPU_CP15 798 mrc p15, 0, r9, c0, c0 @ get processor ID 799#else 800 ldr r9, =CONFIG_PROCESSOR_ID 801#endif 8021: ldr r1, [r12, #0] @ get value 803 ldr r2, [r12, #4] @ get mask 804 eor r1, r1, r9 @ (real ^ match) 805 tst r1, r2 @ & mask 806 ARM( addeq pc, r12, r3 ) @ call cache function 807 THUMB( addeq r12, r3 ) 808 THUMB( moveq pc, r12 ) @ call cache function 809 add r12, r12, #PROC_ENTRY_SIZE 810 b 1b 811 812/* 813 * Table for cache operations. This is basically: 814 * - CPU ID match 815 * - CPU ID mask 816 * - 'cache on' method instruction 817 * - 'cache off' method instruction 818 * - 'cache flush' method instruction 819 * 820 * We match an entry using: ((real_id ^ match) & mask) == 0 821 * 822 * Writethrough caches generally only need 'on' and 'off' 823 * methods. Writeback caches _must_ have the flush method 824 * defined. 825 */ 826 .align 2 827 .type proc_types,#object 828proc_types: 829 .word 0x41000000 @ old ARM ID 830 .word 0xff00f000 831 mov pc, lr 832 THUMB( nop ) 833 mov pc, lr 834 THUMB( nop ) 835 mov pc, lr 836 THUMB( nop ) 837 838 .word 0x41007000 @ ARM7/710 839 .word 0xfff8fe00 840 mov pc, lr 841 THUMB( nop ) 842 mov pc, lr 843 THUMB( nop ) 844 mov pc, lr 845 THUMB( nop ) 846 847 .word 0x41807200 @ ARM720T (writethrough) 848 .word 0xffffff00 849 W(b) __armv4_mmu_cache_on 850 W(b) __armv4_mmu_cache_off 851 mov pc, lr 852 THUMB( nop ) 853 854 .word 0x41007400 @ ARM74x 855 .word 0xff00ff00 856 W(b) __armv3_mpu_cache_on 857 W(b) __armv3_mpu_cache_off 858 W(b) __armv3_mpu_cache_flush 859 860 .word 0x41009400 @ ARM94x 861 .word 0xff00ff00 862 W(b) __armv4_mpu_cache_on 863 W(b) __armv4_mpu_cache_off 864 W(b) __armv4_mpu_cache_flush 865 866 .word 0x41069260 @ ARM926EJ-S (v5TEJ) 867 .word 0xff0ffff0 868 W(b) __arm926ejs_mmu_cache_on 869 W(b) __armv4_mmu_cache_off 870 W(b) __armv5tej_mmu_cache_flush 871 872 .word 0x00007000 @ ARM7 IDs 873 .word 0x0000f000 874 mov pc, lr 875 THUMB( nop ) 876 mov pc, lr 877 THUMB( nop ) 878 mov pc, lr 879 THUMB( nop ) 880 881 @ Everything from here on will be the new ID system. 882 883 .word 0x4401a100 @ sa110 / sa1100 884 .word 0xffffffe0 885 W(b) __armv4_mmu_cache_on 886 W(b) __armv4_mmu_cache_off 887 W(b) __armv4_mmu_cache_flush 888 889 .word 0x6901b110 @ sa1110 890 .word 0xfffffff0 891 W(b) __armv4_mmu_cache_on 892 W(b) __armv4_mmu_cache_off 893 W(b) __armv4_mmu_cache_flush 894 895 .word 0x56056900 896 .word 0xffffff00 @ PXA9xx 897 W(b) __armv4_mmu_cache_on 898 W(b) __armv4_mmu_cache_off 899 W(b) __armv4_mmu_cache_flush 900 901 .word 0x56158000 @ PXA168 902 .word 0xfffff000 903 W(b) __armv4_mmu_cache_on 904 W(b) __armv4_mmu_cache_off 905 W(b) __armv5tej_mmu_cache_flush 906 907 .word 0x56050000 @ Feroceon 908 .word 0xff0f0000 909 W(b) __armv4_mmu_cache_on 910 W(b) __armv4_mmu_cache_off 911 W(b) __armv5tej_mmu_cache_flush 912 913#ifdef CONFIG_CPU_FEROCEON_OLD_ID 914 /* this conflicts with the standard ARMv5TE entry */ 915 .long 0x41009260 @ Old Feroceon 916 .long 0xff00fff0 917 b __armv4_mmu_cache_on 918 b __armv4_mmu_cache_off 919 b __armv5tej_mmu_cache_flush 920#endif 921 922 .word 0x66015261 @ FA526 923 .word 0xff01fff1 924 W(b) __fa526_cache_on 925 W(b) __armv4_mmu_cache_off 926 W(b) __fa526_cache_flush 927 928 @ These match on the architecture ID 929 930 .word 0x00020000 @ ARMv4T 931 .word 0x000f0000 932 W(b) __armv4_mmu_cache_on 933 W(b) __armv4_mmu_cache_off 934 W(b) __armv4_mmu_cache_flush 935 936 .word 0x00050000 @ ARMv5TE 937 .word 0x000f0000 938 W(b) __armv4_mmu_cache_on 939 W(b) __armv4_mmu_cache_off 940 W(b) __armv4_mmu_cache_flush 941 942 .word 0x00060000 @ ARMv5TEJ 943 .word 0x000f0000 944 W(b) __armv4_mmu_cache_on 945 W(b) __armv4_mmu_cache_off 946 W(b) __armv5tej_mmu_cache_flush 947 948 .word 0x0007b000 @ ARMv6 949 .word 0x000ff000 950 W(b) __armv6_mmu_cache_on 951 W(b) __armv4_mmu_cache_off 952 W(b) __armv6_mmu_cache_flush 953 954 .word 0x000f0000 @ new CPU Id 955 .word 0x000f0000 956 W(b) __armv7_mmu_cache_on 957 W(b) __armv7_mmu_cache_off 958 W(b) __armv7_mmu_cache_flush 959 960 .word 0 @ unrecognised type 961 .word 0 962 mov pc, lr 963 THUMB( nop ) 964 mov pc, lr 965 THUMB( nop ) 966 mov pc, lr 967 THUMB( nop ) 968 969 .size proc_types, . - proc_types 970 971 /* 972 * If you get a "non-constant expression in ".if" statement" 973 * error from the assembler on this line, check that you have 974 * not accidentally written a "b" instruction where you should 975 * have written W(b). 976 */ 977 .if (. - proc_types) % PROC_ENTRY_SIZE != 0 978 .error "The size of one or more proc_types entries is wrong." 979 .endif 980 981/* 982 * Turn off the Cache and MMU. ARMv3 does not support 983 * reading the control register, but ARMv4 does. 984 * 985 * On exit, 986 * r0, r1, r2, r3, r9, r12 corrupted 987 * This routine must preserve: 988 * r4, r7, r8 989 */ 990 .align 5 991cache_off: mov r3, #12 @ cache_off function 992 b call_cache_fn 993 994__armv4_mpu_cache_off: 995 mrc p15, 0, r0, c1, c0 996 bic r0, r0, #0x000d 997 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off 998 mov r0, #0 999 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 1000 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache 1001 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache 1002 mov pc, lr 1003 1004__armv3_mpu_cache_off: 1005 mrc p15, 0, r0, c1, c0 1006 bic r0, r0, #0x000d 1007 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off 1008 mov r0, #0 1009 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 1010 mov pc, lr 1011 1012__armv4_mmu_cache_off: 1013#ifdef CONFIG_MMU 1014 mrc p15, 0, r0, c1, c0 1015 bic r0, r0, #0x000d 1016 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 1017 mov r0, #0 1018 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 1019 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 1020#endif 1021 mov pc, lr 1022 1023__armv7_mmu_cache_off: 1024 mrc p15, 0, r0, c1, c0 1025#ifdef CONFIG_MMU 1026 bic r0, r0, #0x000d 1027#else 1028 bic r0, r0, #0x000c 1029#endif 1030 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 1031 mov r12, lr 1032 bl __armv7_mmu_cache_flush 1033 mov r0, #0 1034#ifdef CONFIG_MMU 1035 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB 1036#endif 1037 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC 1038 mcr p15, 0, r0, c7, c10, 4 @ DSB 1039 mcr p15, 0, r0, c7, c5, 4 @ ISB 1040 mov pc, r12 1041 1042/* 1043 * Clean and flush the cache to maintain consistency. 1044 * 1045 * On exit, 1046 * r1, r2, r3, r9, r10, r11, r12 corrupted 1047 * This routine must preserve: 1048 * r4, r6, r7, r8 1049 */ 1050 .align 5 1051cache_clean_flush: 1052 mov r3, #16 1053 b call_cache_fn 1054 1055__armv4_mpu_cache_flush: 1056 mov r2, #1 1057 mov r3, #0 1058 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 1059 mov r1, #7 << 5 @ 8 segments 10601: orr r3, r1, #63 << 26 @ 64 entries 10612: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 1062 subs r3, r3, #1 << 26 1063 bcs 2b @ entries 63 to 0 1064 subs r1, r1, #1 << 5 1065 bcs 1b @ segments 7 to 0 1066 1067 teq r2, #0 1068 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 1069 mcr p15, 0, ip, c7, c10, 4 @ drain WB 1070 mov pc, lr 1071 1072__fa526_cache_flush: 1073 mov r1, #0 1074 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache 1075 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1076 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1077 mov pc, lr 1078 1079__armv6_mmu_cache_flush: 1080 mov r1, #0 1081 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D 1082 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB 1083 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified 1084 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1085 mov pc, lr 1086 1087__armv7_mmu_cache_flush: 1088 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 1089 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) 1090 mov r10, #0 1091 beq hierarchical 1092 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D 1093 b iflush 1094hierarchical: 1095 mcr p15, 0, r10, c7, c10, 5 @ DMB 1096 stmfd sp!, {r0-r7, r9-r11} 1097 mrc p15, 1, r0, c0, c0, 1 @ read clidr 1098 ands r3, r0, #0x7000000 @ extract loc from clidr 1099 mov r3, r3, lsr #23 @ left align loc bit field 1100 beq finished @ if loc is 0, then no need to clean 1101 mov r10, #0 @ start clean at cache level 0 1102loop1: 1103 add r2, r10, r10, lsr #1 @ work out 3x current cache level 1104 mov r1, r0, lsr r2 @ extract cache type bits from clidr 1105 and r1, r1, #7 @ mask of the bits for current cache only 1106 cmp r1, #2 @ see what cache we have at this level 1107 blt skip @ skip if no cache, or just i-cache 1108 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 1109 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr 1110 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 1111 and r2, r1, #7 @ extract the length of the cache lines 1112 add r2, r2, #4 @ add 4 (line length offset) 1113 ldr r4, =0x3ff 1114 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 1115 clz r5, r4 @ find bit position of way size increment 1116 ldr r7, =0x7fff 1117 ands r7, r7, r1, lsr #13 @ extract max number of the index size 1118loop2: 1119 mov r9, r4 @ create working copy of max way size 1120loop3: 1121 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 1122 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 1123 THUMB( lsl r6, r9, r5 ) 1124 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 1125 THUMB( lsl r6, r7, r2 ) 1126 THUMB( orr r11, r11, r6 ) @ factor index number into r11 1127 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 1128 subs r9, r9, #1 @ decrement the way 1129 bge loop3 1130 subs r7, r7, #1 @ decrement the index 1131 bge loop2 1132skip: 1133 add r10, r10, #2 @ increment cache number 1134 cmp r3, r10 1135 bgt loop1 1136finished: 1137 ldmfd sp!, {r0-r7, r9-r11} 1138 mov r10, #0 @ swith back to cache level 0 1139 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 1140iflush: 1141 mcr p15, 0, r10, c7, c10, 4 @ DSB 1142 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB 1143 mcr p15, 0, r10, c7, c10, 4 @ DSB 1144 mcr p15, 0, r10, c7, c5, 4 @ ISB 1145 mov pc, lr 1146 1147__armv5tej_mmu_cache_flush: 11481: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache 1149 bne 1b 1150 mcr p15, 0, r0, c7, c5, 0 @ flush I cache 1151 mcr p15, 0, r0, c7, c10, 4 @ drain WB 1152 mov pc, lr 1153 1154__armv4_mmu_cache_flush: 1155 mov r2, #64*1024 @ default: 32K dcache size (*2) 1156 mov r11, #32 @ default: 32 byte line size 1157 mrc p15, 0, r3, c0, c0, 1 @ read cache type 1158 teq r3, r9 @ cache ID register present? 1159 beq no_cache_id 1160 mov r1, r3, lsr #18 1161 and r1, r1, #7 1162 mov r2, #1024 1163 mov r2, r2, lsl r1 @ base dcache size *2 1164 tst r3, #1 << 14 @ test M bit 1165 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 1166 mov r3, r3, lsr #12 1167 and r3, r3, #3 1168 mov r11, #8 1169 mov r11, r11, lsl r3 @ cache line size in bytes 1170no_cache_id: 1171 mov r1, pc 1172 bic r1, r1, #63 @ align to longest cache line 1173 add r2, r1, r2 11741: 1175 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache 1176 THUMB( ldr r3, [r1] ) @ s/w flush D cache 1177 THUMB( add r1, r1, r11 ) 1178 teq r1, r2 1179 bne 1b 1180 1181 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1182 mcr p15, 0, r1, c7, c6, 0 @ flush D cache 1183 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1184 mov pc, lr 1185 1186__armv3_mmu_cache_flush: 1187__armv3_mpu_cache_flush: 1188 mov r1, #0 1189 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 1190 mov pc, lr 1191 1192/* 1193 * Various debugging routines for printing hex characters and 1194 * memory, which again must be relocatable. 1195 */ 1196#ifdef DEBUG 1197 .align 2 1198 .type phexbuf,#object 1199phexbuf: .space 12 1200 .size phexbuf, . - phexbuf 1201 1202@ phex corrupts {r0, r1, r2, r3} 1203phex: adr r3, phexbuf 1204 mov r2, #0 1205 strb r2, [r3, r1] 12061: subs r1, r1, #1 1207 movmi r0, r3 1208 bmi puts 1209 and r2, r0, #15 1210 mov r0, r0, lsr #4 1211 cmp r2, #10 1212 addge r2, r2, #7 1213 add r2, r2, #'0' 1214 strb r2, [r3, r1] 1215 b 1b 1216 1217@ puts corrupts {r0, r1, r2, r3} 1218puts: loadsp r3, r1 12191: ldrb r2, [r0], #1 1220 teq r2, #0 1221 moveq pc, lr 12222: writeb r2, r3 1223 mov r1, #0x00020000 12243: subs r1, r1, #1 1225 bne 3b 1226 teq r2, #'\n' 1227 moveq r2, #'\r' 1228 beq 2b 1229 teq r0, #0 1230 bne 1b 1231 mov pc, lr 1232@ putc corrupts {r0, r1, r2, r3} 1233putc: 1234 mov r2, r0 1235 mov r0, #0 1236 loadsp r3, r1 1237 b 2b 1238 1239@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} 1240memdump: mov r12, r0 1241 mov r10, lr 1242 mov r11, #0 12432: mov r0, r11, lsl #2 1244 add r0, r0, r12 1245 mov r1, #8 1246 bl phex 1247 mov r0, #':' 1248 bl putc 12491: mov r0, #' ' 1250 bl putc 1251 ldr r0, [r12, r11, lsl #2] 1252 mov r1, #8 1253 bl phex 1254 and r0, r11, #7 1255 teq r0, #3 1256 moveq r0, #' ' 1257 bleq putc 1258 and r0, r11, #7 1259 add r11, r11, #1 1260 teq r0, #7 1261 bne 1b 1262 mov r0, #'\n' 1263 bl putc 1264 cmp r11, #64 1265 blt 2b 1266 mov pc, r10 1267#endif 1268 1269 .ltorg 1270 1271#ifdef CONFIG_ARM_VIRT_EXT 1272.align 5 1273__hyp_reentry_vectors: 1274 W(b) . @ reset 1275 W(b) . @ undef 1276 W(b) . @ svc 1277 W(b) . @ pabort 1278 W(b) . @ dabort 1279 W(b) __enter_kernel @ hyp 1280 W(b) . @ irq 1281 W(b) . @ fiq 1282#endif /* CONFIG_ARM_VIRT_EXT */ 1283 1284__enter_kernel: 1285 mov r0, #0 @ must be 0 1286 ARM( mov pc, r4 ) @ call kernel 1287 THUMB( bx r4 ) @ entry point is always ARM 1288 1289reloc_code_end: 1290 1291 .align 1292 .section ".stack", "aw", %nobits 1293.L_user_stack: .space 4096 1294.L_user_stack_end: 1295