xref: /openbmc/linux/arch/powerpc/kernel/reloc_32.S (revision 0bf49ffb)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Code to process dynamic relocations for PPC32.
4 *
5 * Copyrights (C) IBM Corporation, 2011.
6 *	Author: Suzuki Poulose <suzuki@in.ibm.com>
7 *
8 *  - Based on ppc64 code - reloc_64.S
9 */
10
11#include <asm/ppc_asm.h>
12
13/* Dynamic section table entry tags */
14DT_RELA = 7			/* Tag for Elf32_Rela section */
15DT_RELASZ = 8			/* Size of the Rela relocs */
16DT_RELAENT = 9			/* Size of one Rela reloc entry */
17
18STN_UNDEF = 0			/* Undefined symbol index */
19STB_LOCAL = 0			/* Local binding for the symbol */
20
21R_PPC_ADDR16_LO = 4		/* Lower half of (S+A) */
22R_PPC_ADDR16_HI = 5		/* Upper half of (S+A) */
23R_PPC_ADDR16_HA = 6		/* High Adjusted (S+A) */
24R_PPC_RELATIVE = 22
25
26/*
27 * r3 = desired final address
28 */
29
30_GLOBAL(relocate)
31
32	mflr	r0		/* Save our LR */
33	bl	0f		/* Find our current runtime address */
340:	mflr	r12		/* Make it accessible */
35	mtlr	r0
36
37	lwz	r11, (p_dyn - 0b)(r12)
38	add	r11, r11, r12	/* runtime address of .dynamic section */
39	lwz	r9, (p_rela - 0b)(r12)
40	add	r9, r9, r12	/* runtime address of .rela.dyn section */
41	lwz	r10, (p_st - 0b)(r12)
42	add	r10, r10, r12	/* runtime address of _stext section */
43	lwz	r13, (p_sym - 0b)(r12)
44	add	r13, r13, r12	/* runtime address of .dynsym section */
45
46	/*
47	 * Scan the dynamic section for RELA, RELASZ entries
48	 */
49	li	r6, 0
50	li	r7, 0
51	li	r8, 0
521:	lwz	r5, 0(r11)	/* ELF_Dyn.d_tag */
53	cmpwi	r5, 0		/* End of ELF_Dyn[] */
54	beq	eodyn
55	cmpwi	r5, DT_RELA
56	bne	relasz
57	lwz	r7, 4(r11)	/* r7 = rela.link */
58	b	skip
59relasz:
60	cmpwi	r5, DT_RELASZ
61	bne	relaent
62	lwz	r8, 4(r11)	/* r8 = Total Rela relocs size */
63	b	skip
64relaent:
65	cmpwi	r5, DT_RELAENT
66	bne	skip
67	lwz	r6, 4(r11)	/* r6 = Size of one Rela reloc */
68skip:
69	addi	r11, r11, 8
70	b	1b
71eodyn:				/* End of Dyn Table scan */
72
73	/* Check if we have found all the entries */
74	cmpwi	r7, 0
75	beq	done
76	cmpwi	r8, 0
77	beq	done
78	cmpwi	r6, 0
79	beq	done
80
81
82	/*
83	 * Work out the current offset from the link time address of .rela
84	 * section.
85	 *  cur_offset[r7] = rela.run[r9] - rela.link [r7]
86	 *  _stext.link[r12] = _stext.run[r10] - cur_offset[r7]
87	 *  final_offset[r3] = _stext.final[r3] - _stext.link[r12]
88	 */
89	subf	r7, r7, r9	/* cur_offset */
90	subf	r12, r7, r10
91	subf	r3, r12, r3	/* final_offset */
92
93	subf	r8, r6, r8	/* relaz -= relaent */
94	/*
95	 * Scan through the .rela table and process each entry
96	 * r9	- points to the current .rela table entry
97	 * r13	- points to the symbol table
98	 */
99
100	/*
101	 * Check if we have a relocation based on symbol
102	 * r5 will hold the value of the symbol.
103	 */
104applyrela:
105	lwz	r4, 4(r9)		/* r4 = rela.r_info */
106	srwi	r5, r4, 8		/* ELF32_R_SYM(r_info) */
107	cmpwi	r5, STN_UNDEF	/* sym == STN_UNDEF ? */
108	beq	get_type	/* value = 0 */
109	/* Find the value of the symbol at index(r5) */
110	slwi	r5, r5, 4		/* r5 = r5 * sizeof(Elf32_Sym) */
111	add	r12, r13, r5	/* r12 = &__dyn_sym[Index] */
112
113	/*
114	 * GNU ld has a bug, where dynamic relocs based on
115	 * STB_LOCAL symbols, the value should be assumed
116	 * to be zero. - Alan Modra
117	 */
118	/* XXX: Do we need to check if we are using GNU ld ? */
119	lbz	r5, 12(r12)	/* r5 = dyn_sym[Index].st_info */
120	extrwi	r5, r5, 4, 24	/* r5 = ELF32_ST_BIND(r5) */
121	cmpwi	r5, STB_LOCAL	/* st_value = 0, ld bug */
122	beq	get_type	/* We have r5 = 0 */
123	lwz	r5, 4(r12)	/* r5 = __dyn_sym[Index].st_value */
124
125get_type:
126	/* Load the relocation type to r4 */
127	extrwi	r4, r4, 8, 24	/* r4 = ELF32_R_TYPE(r_info) = ((char*)r4)[3] */
128
129	/* R_PPC_RELATIVE */
130	cmpwi	r4, R_PPC_RELATIVE
131	bne	hi16
132	lwz	r4, 0(r9)	/* r_offset */
133	lwz	r0, 8(r9)	/* r_addend */
134	add	r0, r0, r3	/* final addend */
135	stwx	r0, r4, r7	/* memory[r4+r7]) = (u32)r0 */
136	b	nxtrela		/* continue */
137
138	/* R_PPC_ADDR16_HI */
139hi16:
140	cmpwi	r4, R_PPC_ADDR16_HI
141	bne	ha16
142	lwz	r4, 0(r9)	/* r_offset */
143	lwz	r0, 8(r9)	/* r_addend */
144	add	r0, r0, r3
145	add	r0, r0, r5	/* r0 = (S+A+Offset) */
146	extrwi	r0, r0, 16, 0	/* r0 = (r0 >> 16) */
147	b	store_half
148
149	/* R_PPC_ADDR16_HA */
150ha16:
151	cmpwi	r4, R_PPC_ADDR16_HA
152	bne	lo16
153	lwz	r4, 0(r9)	/* r_offset */
154	lwz	r0, 8(r9)	/* r_addend */
155	add	r0, r0, r3
156	add	r0, r0, r5	/* r0 = (S+A+Offset) */
157	extrwi	r5, r0, 1, 16	/* Extract bit 16 */
158	extrwi	r0, r0, 16, 0	/* r0 = (r0 >> 16) */
159	add	r0, r0, r5	/* Add it to r0 */
160	b	store_half
161
162	/* R_PPC_ADDR16_LO */
163lo16:
164	cmpwi	r4, R_PPC_ADDR16_LO
165	bne	unknown_type
166	lwz	r4, 0(r9)	/* r_offset */
167	lwz	r0, 8(r9)	/* r_addend */
168	add	r0, r0, r3
169	add	r0, r0, r5	/* r0 = (S+A+Offset) */
170	extrwi	r0, r0, 16, 16	/* r0 &= 0xffff */
171	/* Fall through to */
172
173	/* Store half word */
174store_half:
175	sthx	r0, r4, r7	/* memory[r4+r7] = (u16)r0 */
176
177nxtrela:
178	/*
179	 * We have to flush the modified instructions to the
180	 * main storage from the d-cache. And also, invalidate the
181	 * cached instructions in i-cache which has been modified.
182	 *
183	 * We delay the sync / isync operation till the end, since
184	 * we won't be executing the modified instructions until
185	 * we return from here.
186	 */
187	dcbst	r4,r7
188	sync			/* Ensure the data is flushed before icbi */
189	icbi	r4,r7
190unknown_type:
191	cmpwi	r8, 0		/* relasz = 0 ? */
192	ble	done
193	add	r9, r9, r6	/* move to next entry in the .rela table */
194	subf	r8, r6, r8	/* relasz -= relaent */
195	b	applyrela
196
197done:
198	sync			/* Wait for the flush to finish */
199	isync			/* Discard prefetched instructions */
200	blr
201
202p_dyn:		.long	__dynamic_start - 0b
203p_rela:		.long	__rela_dyn_start - 0b
204p_sym:		.long	__dynamic_symtab - 0b
205p_st:		.long	_stext - 0b
206