xref: /openbmc/linux/arch/ia64/kernel/module.c (revision 6aa7de05)
1 /*
2  * IA-64-specific support for kernel module loader.
3  *
4  * Copyright (C) 2003 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * Loosely based on patch by Rusty Russell.
8  */
9 
10 /* relocs tested so far:
11 
12    DIR64LSB
13    FPTR64LSB
14    GPREL22
15    LDXMOV
16    LDXMOV
17    LTOFF22
18    LTOFF22X
19    LTOFF22X
20    LTOFF_FPTR22
21    PCREL21B	(for br.call only; br.cond is not supported out of modules!)
22    PCREL60B	(for brl.cond only; brl.call is not supported for modules!)
23    PCREL64LSB
24    SECREL32LSB
25    SEGREL64LSB
26  */
27 
28 
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
35 
36 #include <asm/patch.h>
37 #include <asm/unaligned.h>
38 
39 #define ARCH_MODULE_DEBUG 0
40 
41 #if ARCH_MODULE_DEBUG
42 # define DEBUGP printk
43 # define inline
44 #else
45 # define DEBUGP(fmt , a...)
46 #endif
47 
48 #ifdef CONFIG_ITANIUM
49 # define USE_BRL	0
50 #else
51 # define USE_BRL	1
52 #endif
53 
54 #define MAX_LTOFF	((uint64_t) (1 << 22))	/* max. allowable linkage-table offset */
55 
56 /* Define some relocation helper macros/types: */
57 
58 #define FORMAT_SHIFT	0
59 #define FORMAT_BITS	3
60 #define FORMAT_MASK	((1 << FORMAT_BITS) - 1)
61 #define VALUE_SHIFT	3
62 #define VALUE_BITS	5
63 #define VALUE_MASK	((1 << VALUE_BITS) - 1)
64 
65 enum reloc_target_format {
66 	/* direct encoded formats: */
67 	RF_NONE = 0,
68 	RF_INSN14 = 1,
69 	RF_INSN22 = 2,
70 	RF_INSN64 = 3,
71 	RF_32MSB = 4,
72 	RF_32LSB = 5,
73 	RF_64MSB = 6,
74 	RF_64LSB = 7,
75 
76 	/* formats that cannot be directly decoded: */
77 	RF_INSN60,
78 	RF_INSN21B,	/* imm21 form 1 */
79 	RF_INSN21M,	/* imm21 form 2 */
80 	RF_INSN21F	/* imm21 form 3 */
81 };
82 
83 enum reloc_value_formula {
84 	RV_DIRECT = 4,		/* S + A */
85 	RV_GPREL = 5,		/* @gprel(S + A) */
86 	RV_LTREL = 6,		/* @ltoff(S + A) */
87 	RV_PLTREL = 7,		/* @pltoff(S + A) */
88 	RV_FPTR = 8,		/* @fptr(S + A) */
89 	RV_PCREL = 9,		/* S + A - P */
90 	RV_LTREL_FPTR = 10,	/* @ltoff(@fptr(S + A)) */
91 	RV_SEGREL = 11,		/* @segrel(S + A) */
92 	RV_SECREL = 12,		/* @secrel(S + A) */
93 	RV_BDREL = 13,		/* BD + A */
94 	RV_LTV = 14,		/* S + A (like RV_DIRECT, except frozen at static link-time) */
95 	RV_PCREL2 = 15,		/* S + A - P */
96 	RV_SPECIAL = 16,	/* various (see below) */
97 	RV_RSVD17 = 17,
98 	RV_TPREL = 18,		/* @tprel(S + A) */
99 	RV_LTREL_TPREL = 19,	/* @ltoff(@tprel(S + A)) */
100 	RV_DTPMOD = 20,		/* @dtpmod(S + A) */
101 	RV_LTREL_DTPMOD = 21,	/* @ltoff(@dtpmod(S + A)) */
102 	RV_DTPREL = 22,		/* @dtprel(S + A) */
103 	RV_LTREL_DTPREL = 23,	/* @ltoff(@dtprel(S + A)) */
104 	RV_RSVD24 = 24,
105 	RV_RSVD25 = 25,
106 	RV_RSVD26 = 26,
107 	RV_RSVD27 = 27
108 	/* 28-31 reserved for implementation-specific purposes.  */
109 };
110 
111 #define N(reloc)	[R_IA64_##reloc] = #reloc
112 
113 static const char *reloc_name[256] = {
114 	N(NONE),		N(IMM14),		N(IMM22),		N(IMM64),
115 	N(DIR32MSB),		N(DIR32LSB),		N(DIR64MSB),		N(DIR64LSB),
116 	N(GPREL22),		N(GPREL64I),		N(GPREL32MSB),		N(GPREL32LSB),
117 	N(GPREL64MSB),		N(GPREL64LSB),		N(LTOFF22),		N(LTOFF64I),
118 	N(PLTOFF22),		N(PLTOFF64I),		N(PLTOFF64MSB),		N(PLTOFF64LSB),
119 	N(FPTR64I),		N(FPTR32MSB),		N(FPTR32LSB),		N(FPTR64MSB),
120 	N(FPTR64LSB),		N(PCREL60B),		N(PCREL21B),		N(PCREL21M),
121 	N(PCREL21F),		N(PCREL32MSB),		N(PCREL32LSB),		N(PCREL64MSB),
122 	N(PCREL64LSB),		N(LTOFF_FPTR22),	N(LTOFF_FPTR64I),	N(LTOFF_FPTR32MSB),
123 	N(LTOFF_FPTR32LSB),	N(LTOFF_FPTR64MSB),	N(LTOFF_FPTR64LSB),	N(SEGREL32MSB),
124 	N(SEGREL32LSB),		N(SEGREL64MSB),		N(SEGREL64LSB),		N(SECREL32MSB),
125 	N(SECREL32LSB),		N(SECREL64MSB),		N(SECREL64LSB),		N(REL32MSB),
126 	N(REL32LSB),		N(REL64MSB),		N(REL64LSB),		N(LTV32MSB),
127 	N(LTV32LSB),		N(LTV64MSB),		N(LTV64LSB),		N(PCREL21BI),
128 	N(PCREL22),		N(PCREL64I),		N(IPLTMSB),		N(IPLTLSB),
129 	N(COPY),		N(LTOFF22X),		N(LDXMOV),		N(TPREL14),
130 	N(TPREL22),		N(TPREL64I),		N(TPREL64MSB),		N(TPREL64LSB),
131 	N(LTOFF_TPREL22),	N(DTPMOD64MSB),		N(DTPMOD64LSB),		N(LTOFF_DTPMOD22),
132 	N(DTPREL14),		N(DTPREL22),		N(DTPREL64I),		N(DTPREL32MSB),
133 	N(DTPREL32LSB),		N(DTPREL64MSB),		N(DTPREL64LSB),		N(LTOFF_DTPREL22)
134 };
135 
136 #undef N
137 
138 /* Opaque struct for insns, to protect against derefs. */
139 struct insn;
140 
141 static inline uint64_t
142 bundle (const struct insn *insn)
143 {
144 	return (uint64_t) insn & ~0xfUL;
145 }
146 
147 static inline int
148 slot (const struct insn *insn)
149 {
150 	return (uint64_t) insn & 0x3;
151 }
152 
153 static int
154 apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
155 {
156 	if (slot(insn) != 1 && slot(insn) != 2) {
157 		printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
158 		       mod->name, slot(insn));
159 		return 0;
160 	}
161 	ia64_patch_imm64((u64) insn, val);
162 	return 1;
163 }
164 
165 static int
166 apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
167 {
168 	if (slot(insn) != 1 && slot(insn) != 2) {
169 		printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
170 		       mod->name, slot(insn));
171 		return 0;
172 	}
173 	if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
174 		printk(KERN_ERR "%s: value %ld out of IMM60 range\n",
175 			mod->name, (long) val);
176 		return 0;
177 	}
178 	ia64_patch_imm60((u64) insn, val);
179 	return 1;
180 }
181 
182 static int
183 apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
184 {
185 	if (val + (1 << 21) >= (1 << 22)) {
186 		printk(KERN_ERR "%s: value %li out of IMM22 range\n",
187 			mod->name, (long)val);
188 		return 0;
189 	}
190 	ia64_patch((u64) insn, 0x01fffcfe000UL, (  ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
191 					         | ((val & 0x1f0000UL) <<  6) /* bit 16 -> 22 */
192 					         | ((val & 0x00ff80UL) << 20) /* bit  7 -> 27 */
193 					         | ((val & 0x00007fUL) << 13) /* bit  0 -> 13 */));
194 	return 1;
195 }
196 
197 static int
198 apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
199 {
200 	if (val + (1 << 20) >= (1 << 21)) {
201 		printk(KERN_ERR "%s: value %li out of IMM21b range\n",
202 			mod->name, (long)val);
203 		return 0;
204 	}
205 	ia64_patch((u64) insn, 0x11ffffe000UL, (  ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
206 					        | ((val & 0x0fffffUL) << 13) /* bit  0 -> 13 */));
207 	return 1;
208 }
209 
210 #if USE_BRL
211 
212 struct plt_entry {
213 	/* Three instruction bundles in PLT. */
214  	unsigned char bundle[2][16];
215 };
216 
217 static const struct plt_entry ia64_plt_template = {
218 	{
219 		{
220 			0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
221 			0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /*	     movl gp=TARGET_GP */
222 			0x00, 0x00, 0x00, 0x60
223 		},
224 		{
225 			0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
226 			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*	     brl.many gp=TARGET_GP */
227 			0x08, 0x00, 0x00, 0xc0
228 		}
229 	}
230 };
231 
232 static int
233 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
234 {
235 	if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
236 	    && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
237 			   (target_ip - (int64_t) plt->bundle[1]) / 16))
238 		return 1;
239 	return 0;
240 }
241 
242 unsigned long
243 plt_target (struct plt_entry *plt)
244 {
245 	uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
246 	long off;
247 
248 	b0 = b[0]; b1 = b[1];
249 	off = (  ((b1 & 0x00fffff000000000UL) >> 36)		/* imm20b -> bit 0 */
250 	       | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36)	/* imm39 -> bit 20 */
251 	       | ((b1 & 0x0800000000000000UL) << 0));		/* i -> bit 59 */
252 	return (long) plt->bundle[1] + 16*off;
253 }
254 
255 #else /* !USE_BRL */
256 
257 struct plt_entry {
258 	/* Three instruction bundles in PLT. */
259  	unsigned char bundle[3][16];
260 };
261 
262 static const struct plt_entry ia64_plt_template = {
263 	{
264 		{
265 			0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
266 			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*	     movl r16=TARGET_IP */
267 			0x02, 0x00, 0x00, 0x60
268 		},
269 		{
270 			0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
271 			0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /*	     movl gp=TARGET_GP */
272 			0x00, 0x00, 0x00, 0x60
273 		},
274 		{
275 			0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
276 			0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /*	     mov b6=r16 */
277 			0x60, 0x00, 0x80, 0x00		    /*	     br.few b6 */
278 		}
279 	}
280 };
281 
282 static int
283 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
284 {
285 	if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
286 	    && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
287 		return 1;
288 	return 0;
289 }
290 
291 unsigned long
292 plt_target (struct plt_entry *plt)
293 {
294 	uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
295 
296 	b0 = b[0]; b1 = b[1];
297 	return (  ((b1 & 0x000007f000000000) >> 36)		/* imm7b -> bit 0 */
298 		| ((b1 & 0x07fc000000000000) >> 43)		/* imm9d -> bit 7 */
299 		| ((b1 & 0x0003e00000000000) >> 29)		/* imm5c -> bit 16 */
300 		| ((b1 & 0x0000100000000000) >> 23)		/* ic -> bit 21 */
301 		| ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40)	/* imm41 -> bit 22 */
302 		| ((b1 & 0x0800000000000000) <<  4));		/* i -> bit 63 */
303 }
304 
305 #endif /* !USE_BRL */
306 
307 void
308 module_arch_freeing_init (struct module *mod)
309 {
310 	if (mod->arch.init_unw_table) {
311 		unw_remove_unwind_table(mod->arch.init_unw_table);
312 		mod->arch.init_unw_table = NULL;
313 	}
314 }
315 
316 /* Have we already seen one of these relocations? */
317 /* FIXME: we could look in other sections, too --RR */
318 static int
319 duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
320 {
321 	unsigned int i;
322 
323 	for (i = 0; i < num; i++) {
324 		if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
325 			return 1;
326 	}
327 	return 0;
328 }
329 
330 /* Count how many GOT entries we may need */
331 static unsigned int
332 count_gots (const Elf64_Rela *rela, unsigned int num)
333 {
334 	unsigned int i, ret = 0;
335 
336 	/* Sure, this is order(n^2), but it's usually short, and not
337            time critical */
338 	for (i = 0; i < num; i++) {
339 		switch (ELF64_R_TYPE(rela[i].r_info)) {
340 		      case R_IA64_LTOFF22:
341 		      case R_IA64_LTOFF22X:
342 		      case R_IA64_LTOFF64I:
343 		      case R_IA64_LTOFF_FPTR22:
344 		      case R_IA64_LTOFF_FPTR64I:
345 		      case R_IA64_LTOFF_FPTR32MSB:
346 		      case R_IA64_LTOFF_FPTR32LSB:
347 		      case R_IA64_LTOFF_FPTR64MSB:
348 		      case R_IA64_LTOFF_FPTR64LSB:
349 			if (!duplicate_reloc(rela, i))
350 				ret++;
351 			break;
352 		}
353 	}
354 	return ret;
355 }
356 
357 /* Count how many PLT entries we may need */
358 static unsigned int
359 count_plts (const Elf64_Rela *rela, unsigned int num)
360 {
361 	unsigned int i, ret = 0;
362 
363 	/* Sure, this is order(n^2), but it's usually short, and not
364            time critical */
365 	for (i = 0; i < num; i++) {
366 		switch (ELF64_R_TYPE(rela[i].r_info)) {
367 		      case R_IA64_PCREL21B:
368 		      case R_IA64_PLTOFF22:
369 		      case R_IA64_PLTOFF64I:
370 		      case R_IA64_PLTOFF64MSB:
371 		      case R_IA64_PLTOFF64LSB:
372 		      case R_IA64_IPLTMSB:
373 		      case R_IA64_IPLTLSB:
374 			if (!duplicate_reloc(rela, i))
375 				ret++;
376 			break;
377 		}
378 	}
379 	return ret;
380 }
381 
382 /* We need to create an function-descriptors for any internal function
383    which is referenced. */
384 static unsigned int
385 count_fdescs (const Elf64_Rela *rela, unsigned int num)
386 {
387 	unsigned int i, ret = 0;
388 
389 	/* Sure, this is order(n^2), but it's usually short, and not time critical.  */
390 	for (i = 0; i < num; i++) {
391 		switch (ELF64_R_TYPE(rela[i].r_info)) {
392 		      case R_IA64_FPTR64I:
393 		      case R_IA64_FPTR32LSB:
394 		      case R_IA64_FPTR32MSB:
395 		      case R_IA64_FPTR64LSB:
396 		      case R_IA64_FPTR64MSB:
397 		      case R_IA64_LTOFF_FPTR22:
398 		      case R_IA64_LTOFF_FPTR32LSB:
399 		      case R_IA64_LTOFF_FPTR32MSB:
400 		      case R_IA64_LTOFF_FPTR64I:
401 		      case R_IA64_LTOFF_FPTR64LSB:
402 		      case R_IA64_LTOFF_FPTR64MSB:
403 		      case R_IA64_IPLTMSB:
404 		      case R_IA64_IPLTLSB:
405 			/*
406 			 * Jumps to static functions sometimes go straight to their
407 			 * offset.  Of course, that may not be possible if the jump is
408 			 * from init -> core or vice. versa, so we need to generate an
409 			 * FDESC (and PLT etc) for that.
410 			 */
411 		      case R_IA64_PCREL21B:
412 			if (!duplicate_reloc(rela, i))
413 				ret++;
414 			break;
415 		}
416 	}
417 	return ret;
418 }
419 
420 int
421 module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
422 			   struct module *mod)
423 {
424 	unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
425 	Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
426 
427 	/*
428 	 * To store the PLTs and function-descriptors, we expand the .text section for
429 	 * core module-code and the .init.text section for initialization code.
430 	 */
431 	for (s = sechdrs; s < sechdrs_end; ++s)
432 		if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
433 			mod->arch.core_plt = s;
434 		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
435 			mod->arch.init_plt = s;
436 		else if (strcmp(".got", secstrings + s->sh_name) == 0)
437 			mod->arch.got = s;
438 		else if (strcmp(".opd", secstrings + s->sh_name) == 0)
439 			mod->arch.opd = s;
440 		else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
441 			mod->arch.unwind = s;
442 
443 	if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
444 		printk(KERN_ERR "%s: sections missing\n", mod->name);
445 		return -ENOEXEC;
446 	}
447 
448 	/* GOT and PLTs can occur in any relocated section... */
449 	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
450 		const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
451 		unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
452 
453 		if (s->sh_type != SHT_RELA)
454 			continue;
455 
456 		gots += count_gots(rels, numrels);
457 		fdescs += count_fdescs(rels, numrels);
458 		if (strstr(secstrings + s->sh_name, ".init"))
459 			init_plts += count_plts(rels, numrels);
460 		else
461 			core_plts += count_plts(rels, numrels);
462 	}
463 
464 	mod->arch.core_plt->sh_type = SHT_NOBITS;
465 	mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
466 	mod->arch.core_plt->sh_addralign = 16;
467 	mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
468 	mod->arch.init_plt->sh_type = SHT_NOBITS;
469 	mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
470 	mod->arch.init_plt->sh_addralign = 16;
471 	mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
472 	mod->arch.got->sh_type = SHT_NOBITS;
473 	mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
474 	mod->arch.got->sh_addralign = 8;
475 	mod->arch.got->sh_size = gots * sizeof(struct got_entry);
476 	mod->arch.opd->sh_type = SHT_NOBITS;
477 	mod->arch.opd->sh_flags = SHF_ALLOC;
478 	mod->arch.opd->sh_addralign = 8;
479 	mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
480 	DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
481 	       __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
482 	       mod->arch.got->sh_size, mod->arch.opd->sh_size);
483 	return 0;
484 }
485 
486 static inline int
487 in_init (const struct module *mod, uint64_t addr)
488 {
489 	return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
490 }
491 
492 static inline int
493 in_core (const struct module *mod, uint64_t addr)
494 {
495 	return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
496 }
497 
498 static inline int
499 is_internal (const struct module *mod, uint64_t value)
500 {
501 	return in_init(mod, value) || in_core(mod, value);
502 }
503 
504 /*
505  * Get gp-relative offset for the linkage-table entry of VALUE.
506  */
507 static uint64_t
508 get_ltoff (struct module *mod, uint64_t value, int *okp)
509 {
510 	struct got_entry *got, *e;
511 
512 	if (!*okp)
513 		return 0;
514 
515 	got = (void *) mod->arch.got->sh_addr;
516 	for (e = got; e < got + mod->arch.next_got_entry; ++e)
517 		if (e->val == value)
518 			goto found;
519 
520 	/* Not enough GOT entries? */
521 	BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
522 
523 	e->val = value;
524 	++mod->arch.next_got_entry;
525   found:
526 	return (uint64_t) e - mod->arch.gp;
527 }
528 
529 static inline int
530 gp_addressable (struct module *mod, uint64_t value)
531 {
532 	return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
533 }
534 
535 /* Get PC-relative PLT entry for this value.  Returns 0 on failure. */
536 static uint64_t
537 get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
538 {
539 	struct plt_entry *plt, *plt_end;
540 	uint64_t target_ip, target_gp;
541 
542 	if (!*okp)
543 		return 0;
544 
545 	if (in_init(mod, (uint64_t) insn)) {
546 		plt = (void *) mod->arch.init_plt->sh_addr;
547 		plt_end = (void *) plt + mod->arch.init_plt->sh_size;
548 	} else {
549 		plt = (void *) mod->arch.core_plt->sh_addr;
550 		plt_end = (void *) plt + mod->arch.core_plt->sh_size;
551 	}
552 
553 	/* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
554 	target_ip = ((uint64_t *) value)[0];
555 	target_gp = ((uint64_t *) value)[1];
556 
557 	/* Look for existing PLT entry. */
558 	while (plt->bundle[0][0]) {
559 		if (plt_target(plt) == target_ip)
560 			goto found;
561 		if (++plt >= plt_end)
562 			BUG();
563 	}
564 	*plt = ia64_plt_template;
565 	if (!patch_plt(mod, plt, target_ip, target_gp)) {
566 		*okp = 0;
567 		return 0;
568 	}
569 #if ARCH_MODULE_DEBUG
570 	if (plt_target(plt) != target_ip) {
571 		printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
572 		       __func__, target_ip, plt_target(plt));
573 		*okp = 0;
574 		return 0;
575 	}
576 #endif
577   found:
578 	return (uint64_t) plt;
579 }
580 
581 /* Get function descriptor for VALUE. */
582 static uint64_t
583 get_fdesc (struct module *mod, uint64_t value, int *okp)
584 {
585 	struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
586 
587 	if (!*okp)
588 		return 0;
589 
590 	if (!value) {
591 		printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
592 		return 0;
593 	}
594 
595 	if (!is_internal(mod, value))
596 		/*
597 		 * If it's not a module-local entry-point, "value" already points to a
598 		 * function-descriptor.
599 		 */
600 		return value;
601 
602 	/* Look for existing function descriptor. */
603 	while (fdesc->ip) {
604 		if (fdesc->ip == value)
605 			return (uint64_t)fdesc;
606 		if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
607 			BUG();
608 	}
609 
610 	/* Create new one */
611 	fdesc->ip = value;
612 	fdesc->gp = mod->arch.gp;
613 	return (uint64_t) fdesc;
614 }
615 
616 static inline int
617 do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
618 	  Elf64_Shdr *sec, void *location)
619 {
620 	enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
621 	enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
622 	uint64_t val;
623 	int ok = 1;
624 
625 	val = sym->st_value + addend;
626 
627 	switch (formula) {
628 	      case RV_SEGREL:	/* segment base is arbitrarily chosen to be 0 for kernel modules */
629 	      case RV_DIRECT:
630 		break;
631 
632 	      case RV_GPREL:	  val -= mod->arch.gp; break;
633 	      case RV_LTREL:	  val = get_ltoff(mod, val, &ok); break;
634 	      case RV_PLTREL:	  val = get_plt(mod, location, val, &ok); break;
635 	      case RV_FPTR:	  val = get_fdesc(mod, val, &ok); break;
636 	      case RV_SECREL:	  val -= sec->sh_addr; break;
637 	      case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
638 
639 	      case RV_PCREL:
640 		switch (r_type) {
641 		      case R_IA64_PCREL21B:
642 			if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
643 			    (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
644 				/*
645 				 * Init section may have been allocated far away from core,
646 				 * if the branch won't reach, then allocate a plt for it.
647 				 */
648 				uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
649 				if (delta + (1 << 20) >= (1 << 21)) {
650 					val = get_fdesc(mod, val, &ok);
651 					val = get_plt(mod, location, val, &ok);
652 				}
653 			} else if (!is_internal(mod, val))
654 				val = get_plt(mod, location, val, &ok);
655 			/* FALL THROUGH */
656 		      default:
657 			val -= bundle(location);
658 			break;
659 
660 		      case R_IA64_PCREL32MSB:
661 		      case R_IA64_PCREL32LSB:
662 		      case R_IA64_PCREL64MSB:
663 		      case R_IA64_PCREL64LSB:
664 			val -= (uint64_t) location;
665 			break;
666 
667 		}
668 		switch (r_type) {
669 		      case R_IA64_PCREL60B: format = RF_INSN60; break;
670 		      case R_IA64_PCREL21B: format = RF_INSN21B; break;
671 		      case R_IA64_PCREL21M: format = RF_INSN21M; break;
672 		      case R_IA64_PCREL21F: format = RF_INSN21F; break;
673 		      default: break;
674 		}
675 		break;
676 
677 	      case RV_BDREL:
678 		val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
679 		break;
680 
681 	      case RV_LTV:
682 		/* can link-time value relocs happen here?  */
683 		BUG();
684 		break;
685 
686 	      case RV_PCREL2:
687 		if (r_type == R_IA64_PCREL21BI) {
688 			if (!is_internal(mod, val)) {
689 				printk(KERN_ERR "%s: %s reloc against "
690 					"non-local symbol (%lx)\n", __func__,
691 					reloc_name[r_type], (unsigned long)val);
692 				return -ENOEXEC;
693 			}
694 			format = RF_INSN21B;
695 		}
696 		val -= bundle(location);
697 		break;
698 
699 	      case RV_SPECIAL:
700 		switch (r_type) {
701 		      case R_IA64_IPLTMSB:
702 		      case R_IA64_IPLTLSB:
703 			val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
704 			format = RF_64LSB;
705 			if (r_type == R_IA64_IPLTMSB)
706 				format = RF_64MSB;
707 			break;
708 
709 		      case R_IA64_SUB:
710 			val = addend - sym->st_value;
711 			format = RF_INSN64;
712 			break;
713 
714 		      case R_IA64_LTOFF22X:
715 			if (gp_addressable(mod, val))
716 				val -= mod->arch.gp;
717 			else
718 				val = get_ltoff(mod, val, &ok);
719 			format = RF_INSN22;
720 			break;
721 
722 		      case R_IA64_LDXMOV:
723 			if (gp_addressable(mod, val)) {
724 				/* turn "ld8" into "mov": */
725 				DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
726 				ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
727 			}
728 			return 0;
729 
730 		      default:
731 			if (reloc_name[r_type])
732 				printk(KERN_ERR "%s: special reloc %s not supported",
733 				       mod->name, reloc_name[r_type]);
734 			else
735 				printk(KERN_ERR "%s: unknown special reloc %x\n",
736 				       mod->name, r_type);
737 			return -ENOEXEC;
738 		}
739 		break;
740 
741 	      case RV_TPREL:
742 	      case RV_LTREL_TPREL:
743 	      case RV_DTPMOD:
744 	      case RV_LTREL_DTPMOD:
745 	      case RV_DTPREL:
746 	      case RV_LTREL_DTPREL:
747 		printk(KERN_ERR "%s: %s reloc not supported\n",
748 		       mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
749 		return -ENOEXEC;
750 
751 	      default:
752 		printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
753 		return -ENOEXEC;
754 	}
755 
756 	if (!ok)
757 		return -ENOEXEC;
758 
759 	DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
760 	       reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
761 
762 	switch (format) {
763 	      case RF_INSN21B:	ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
764 	      case RF_INSN22:	ok = apply_imm22(mod, location, val); break;
765 	      case RF_INSN64:	ok = apply_imm64(mod, location, val); break;
766 	      case RF_INSN60:	ok = apply_imm60(mod, location, (int64_t) val / 16); break;
767 	      case RF_32LSB:	put_unaligned(val, (uint32_t *) location); break;
768 	      case RF_64LSB:	put_unaligned(val, (uint64_t *) location); break;
769 	      case RF_32MSB:	/* ia64 Linux is little-endian... */
770 	      case RF_64MSB:	/* ia64 Linux is little-endian... */
771 	      case RF_INSN14:	/* must be within-module, i.e., resolved by "ld -r" */
772 	      case RF_INSN21M:	/* must be within-module, i.e., resolved by "ld -r" */
773 	      case RF_INSN21F:	/* must be within-module, i.e., resolved by "ld -r" */
774 		printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
775 		       mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
776 		return -ENOEXEC;
777 
778 	      default:
779 		printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
780 		       mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
781 		return -ENOEXEC;
782 	}
783 	return ok ? 0 : -ENOEXEC;
784 }
785 
786 int
787 apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
788 		    unsigned int relsec, struct module *mod)
789 {
790 	unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
791 	Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
792 	Elf64_Shdr *target_sec;
793 	int ret;
794 
795 	DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
796 	       relsec, n, sechdrs[relsec].sh_info);
797 
798 	target_sec = sechdrs + sechdrs[relsec].sh_info;
799 
800 	if (target_sec->sh_entsize == ~0UL)
801 		/*
802 		 * If target section wasn't allocated, we don't need to relocate it.
803 		 * Happens, e.g., for debug sections.
804 		 */
805 		return 0;
806 
807 	if (!mod->arch.gp) {
808 		/*
809 		 * XXX Should have an arch-hook for running this after final section
810 		 *     addresses have been selected...
811 		 */
812 		uint64_t gp;
813 		if (mod->core_layout.size > MAX_LTOFF)
814 			/*
815 			 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
816 			 * at the end of the module.
817 			 */
818 			gp = mod->core_layout.size - MAX_LTOFF / 2;
819 		else
820 			gp = mod->core_layout.size / 2;
821 		gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
822 		mod->arch.gp = gp;
823 		DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
824 	}
825 
826 	for (i = 0; i < n; i++) {
827 		ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
828 			       ((Elf64_Sym *) sechdrs[symindex].sh_addr
829 				+ ELF64_R_SYM(rela[i].r_info)),
830 			       rela[i].r_addend, target_sec,
831 			       (void *) target_sec->sh_addr + rela[i].r_offset);
832 		if (ret < 0)
833 			return ret;
834 	}
835 	return 0;
836 }
837 
838 /*
839  * Modules contain a single unwind table which covers both the core and the init text
840  * sections but since the two are not contiguous, we need to split this table up such that
841  * we can register (and unregister) each "segment" separately.  Fortunately, this sounds
842  * more complicated than it really is.
843  */
844 static void
845 register_unwind_table (struct module *mod)
846 {
847 	struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
848 	struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
849 	struct unw_table_entry tmp, *e1, *e2, *core, *init;
850 	unsigned long num_init = 0, num_core = 0;
851 
852 	/* First, count how many init and core unwind-table entries there are.  */
853 	for (e1 = start; e1 < end; ++e1)
854 		if (in_init(mod, e1->start_offset))
855 			++num_init;
856 		else
857 			++num_core;
858 	/*
859 	 * Second, sort the table such that all unwind-table entries for the init and core
860 	 * text sections are nicely separated.  We do this with a stupid bubble sort
861 	 * (unwind tables don't get ridiculously huge).
862 	 */
863 	for (e1 = start; e1 < end; ++e1) {
864 		for (e2 = e1 + 1; e2 < end; ++e2) {
865 			if (e2->start_offset < e1->start_offset) {
866 				tmp = *e1;
867 				*e1 = *e2;
868 				*e2 = tmp;
869 			}
870 		}
871 	}
872 	/*
873 	 * Third, locate the init and core segments in the unwind table:
874 	 */
875 	if (in_init(mod, start->start_offset)) {
876 		init = start;
877 		core = start + num_init;
878 	} else {
879 		core = start;
880 		init = start + num_core;
881 	}
882 
883 	DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
884 	       mod->name, mod->arch.gp, num_init, num_core);
885 
886 	/*
887 	 * Fourth, register both tables (if not empty).
888 	 */
889 	if (num_core > 0) {
890 		mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
891 								core, core + num_core);
892 		DEBUGP("%s:  core: handle=%p [%p-%p)\n", __func__,
893 		       mod->arch.core_unw_table, core, core + num_core);
894 	}
895 	if (num_init > 0) {
896 		mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
897 								init, init + num_init);
898 		DEBUGP("%s:  init: handle=%p [%p-%p)\n", __func__,
899 		       mod->arch.init_unw_table, init, init + num_init);
900 	}
901 }
902 
903 int
904 module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
905 {
906 	DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
907 	if (mod->arch.unwind)
908 		register_unwind_table(mod);
909 	return 0;
910 }
911 
912 void
913 module_arch_cleanup (struct module *mod)
914 {
915 	if (mod->arch.init_unw_table)
916 		unw_remove_unwind_table(mod->arch.init_unw_table);
917 	if (mod->arch.core_unw_table)
918 		unw_remove_unwind_table(mod->arch.core_unw_table);
919 }
920