head.S (c3cee924bd855184d15bc4aa6088dcf8e2c1394c) head.S (d7bea550279db28cd154fd54843ebc858ffdf0b7)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level CPU initialisation
4 * Based on arch/arm/kernel/head.S
5 *
6 * Copyright (C) 1994-2002 Russell King
7 * Copyright (C) 2003-2012 ARM Ltd.
8 * Authors: Catalin Marinas <catalin.marinas@arm.com>

--- 743 unchanged lines hidden (view full) ---

752SYM_FUNC_END(__no_granule_support)
753
754#ifdef CONFIG_RELOCATABLE
755SYM_FUNC_START_LOCAL(__relocate_kernel)
756 /*
757 * Iterate over each entry in the relocation table, and apply the
758 * relocations in place.
759 */
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level CPU initialisation
4 * Based on arch/arm/kernel/head.S
5 *
6 * Copyright (C) 1994-2002 Russell King
7 * Copyright (C) 2003-2012 ARM Ltd.
8 * Authors: Catalin Marinas <catalin.marinas@arm.com>

--- 743 unchanged lines hidden (view full) ---

752SYM_FUNC_END(__no_granule_support)
753
754#ifdef CONFIG_RELOCATABLE
755SYM_FUNC_START_LOCAL(__relocate_kernel)
756 /*
757 * Iterate over each entry in the relocation table, and apply the
758 * relocations in place.
759 */
760 ldr w9, =__rela_offset // offset to reloc table
761 ldr w10, =__rela_size // size of reloc table
762
760 adr_l x9, __rela_start
761 adr_l x10, __rela_end
763 mov_q x11, KIMAGE_VADDR // default virtual offset
764 add x11, x11, x23 // actual virtual offset
762 mov_q x11, KIMAGE_VADDR // default virtual offset
763 add x11, x11, x23 // actual virtual offset
765 add x9, x9, x11 // __va(.rela)
766 add x10, x9, x10 // __va(.rela) + sizeof(.rela)
767
7680: cmp x9, x10
769 b.hs 1f
770 ldp x12, x13, [x9], #24
771 ldr x14, [x9, #-8]
772 cmp w13, #R_AARCH64_RELATIVE
773 b.ne 0b
774 add x14, x14, x23 // relocate

--- 33 unchanged lines hidden (view full) ---

808 * bit in x14.
809 *
810 * Because addends are stored in place in the binary, RELR relocations
811 * cannot be applied idempotently. We use x24 to keep track of the
812 * currently applied displacement so that we can correctly relocate if
813 * __relocate_kernel is called twice with non-zero displacements (i.e.
814 * if there is both a physical misalignment and a KASLR displacement).
815 */
764
7650: cmp x9, x10
766 b.hs 1f
767 ldp x12, x13, [x9], #24
768 ldr x14, [x9, #-8]
769 cmp w13, #R_AARCH64_RELATIVE
770 b.ne 0b
771 add x14, x14, x23 // relocate

--- 33 unchanged lines hidden (view full) ---

805 * bit in x14.
806 *
807 * Because addends are stored in place in the binary, RELR relocations
808 * cannot be applied idempotently. We use x24 to keep track of the
809 * currently applied displacement so that we can correctly relocate if
810 * __relocate_kernel is called twice with non-zero displacements (i.e.
811 * if there is both a physical misalignment and a KASLR displacement).
812 */
816 ldr w9, =__relr_offset // offset to reloc table
817 ldr w10, =__relr_size // size of reloc table
818 add x9, x9, x11 // __va(.relr)
819 add x10, x9, x10 // __va(.relr) + sizeof(.relr)
813 adr_l x9, __relr_start
814 adr_l x10, __relr_end
820
821 sub x15, x23, x24 // delta from previous offset
822 cbz x15, 7f // nothing to do if unchanged
823 mov x24, x23 // save new offset
824
8252: cmp x9, x10
826 b.hs 7f
827 ldr x11, [x9], #8

--- 75 unchanged lines hidden ---
815
816 sub x15, x23, x24 // delta from previous offset
817 cbz x15, 7f // nothing to do if unchanged
818 mov x24, x23 // save new offset
819
8202: cmp x9, x10
821 b.hs 7f
822 ldr x11, [x9], #8

--- 75 unchanged lines hidden ---