xref: /openbmc/linux/arch/ia64/kernel/module.c (revision 1da177e4)
1 /*
2  * IA-64-specific support for kernel module loader.
3  *
4  * Copyright (C) 2003 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * Loosely based on patch by Rusty Russell.
8  */
9 
10 /* relocs tested so far:
11 
12    DIR64LSB
13    FPTR64LSB
14    GPREL22
15    LDXMOV
16    LDXMOV
17    LTOFF22
18    LTOFF22X
19    LTOFF22X
20    LTOFF_FPTR22
21    PCREL21B	(for br.call only; br.cond is not supported out of modules!)
22    PCREL60B	(for brl.cond only; brl.call is not supported for modules!)
23    PCREL64LSB
24    SECREL32LSB
25    SEGREL64LSB
26  */
27 
28 #include <linux/config.h>
29 
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
32 #include <linux/elf.h>
33 #include <linux/moduleloader.h>
34 #include <linux/string.h>
35 #include <linux/vmalloc.h>
36 
37 #include <asm/patch.h>
38 #include <asm/unaligned.h>
39 
40 #define ARCH_MODULE_DEBUG 0
41 
42 #if ARCH_MODULE_DEBUG
43 # define DEBUGP printk
44 # define inline
45 #else
46 # define DEBUGP(fmt , a...)
47 #endif
48 
49 #ifdef CONFIG_ITANIUM
50 # define USE_BRL	0
51 #else
52 # define USE_BRL	1
53 #endif
54 
55 #define MAX_LTOFF	((uint64_t) (1 << 22))	/* max. allowable linkage-table offset */
56 
57 /* Define some relocation helper macros/types: */
58 
59 #define FORMAT_SHIFT	0
60 #define FORMAT_BITS	3
61 #define FORMAT_MASK	((1 << FORMAT_BITS) - 1)
62 #define VALUE_SHIFT	3
63 #define VALUE_BITS	5
64 #define VALUE_MASK	((1 << VALUE_BITS) - 1)
65 
66 enum reloc_target_format {
67 	/* direct encoded formats: */
68 	RF_NONE = 0,
69 	RF_INSN14 = 1,
70 	RF_INSN22 = 2,
71 	RF_INSN64 = 3,
72 	RF_32MSB = 4,
73 	RF_32LSB = 5,
74 	RF_64MSB = 6,
75 	RF_64LSB = 7,
76 
77 	/* formats that cannot be directly decoded: */
78 	RF_INSN60,
79 	RF_INSN21B,	/* imm21 form 1 */
80 	RF_INSN21M,	/* imm21 form 2 */
81 	RF_INSN21F	/* imm21 form 3 */
82 };
83 
84 enum reloc_value_formula {
85 	RV_DIRECT = 4,		/* S + A */
86 	RV_GPREL = 5,		/* @gprel(S + A) */
87 	RV_LTREL = 6,		/* @ltoff(S + A) */
88 	RV_PLTREL = 7,		/* @pltoff(S + A) */
89 	RV_FPTR = 8,		/* @fptr(S + A) */
90 	RV_PCREL = 9,		/* S + A - P */
91 	RV_LTREL_FPTR = 10,	/* @ltoff(@fptr(S + A)) */
92 	RV_SEGREL = 11,		/* @segrel(S + A) */
93 	RV_SECREL = 12,		/* @secrel(S + A) */
94 	RV_BDREL = 13,		/* BD + A */
95 	RV_LTV = 14,		/* S + A (like RV_DIRECT, except frozen at static link-time) */
96 	RV_PCREL2 = 15,		/* S + A - P */
97 	RV_SPECIAL = 16,	/* various (see below) */
98 	RV_RSVD17 = 17,
99 	RV_TPREL = 18,		/* @tprel(S + A) */
100 	RV_LTREL_TPREL = 19,	/* @ltoff(@tprel(S + A)) */
101 	RV_DTPMOD = 20,		/* @dtpmod(S + A) */
102 	RV_LTREL_DTPMOD = 21,	/* @ltoff(@dtpmod(S + A)) */
103 	RV_DTPREL = 22,		/* @dtprel(S + A) */
104 	RV_LTREL_DTPREL = 23,	/* @ltoff(@dtprel(S + A)) */
105 	RV_RSVD24 = 24,
106 	RV_RSVD25 = 25,
107 	RV_RSVD26 = 26,
108 	RV_RSVD27 = 27
109 	/* 28-31 reserved for implementation-specific purposes.  */
110 };
111 
112 #define N(reloc)	[R_IA64_##reloc] = #reloc
113 
114 static const char *reloc_name[256] = {
115 	N(NONE),		N(IMM14),		N(IMM22),		N(IMM64),
116 	N(DIR32MSB),		N(DIR32LSB),		N(DIR64MSB),		N(DIR64LSB),
117 	N(GPREL22),		N(GPREL64I),		N(GPREL32MSB),		N(GPREL32LSB),
118 	N(GPREL64MSB),		N(GPREL64LSB),		N(LTOFF22),		N(LTOFF64I),
119 	N(PLTOFF22),		N(PLTOFF64I),		N(PLTOFF64MSB),		N(PLTOFF64LSB),
120 	N(FPTR64I),		N(FPTR32MSB),		N(FPTR32LSB),		N(FPTR64MSB),
121 	N(FPTR64LSB),		N(PCREL60B),		N(PCREL21B),		N(PCREL21M),
122 	N(PCREL21F),		N(PCREL32MSB),		N(PCREL32LSB),		N(PCREL64MSB),
123 	N(PCREL64LSB),		N(LTOFF_FPTR22),	N(LTOFF_FPTR64I),	N(LTOFF_FPTR32MSB),
124 	N(LTOFF_FPTR32LSB),	N(LTOFF_FPTR64MSB),	N(LTOFF_FPTR64LSB),	N(SEGREL32MSB),
125 	N(SEGREL32LSB),		N(SEGREL64MSB),		N(SEGREL64LSB),		N(SECREL32MSB),
126 	N(SECREL32LSB),		N(SECREL64MSB),		N(SECREL64LSB),		N(REL32MSB),
127 	N(REL32LSB),		N(REL64MSB),		N(REL64LSB),		N(LTV32MSB),
128 	N(LTV32LSB),		N(LTV64MSB),		N(LTV64LSB),		N(PCREL21BI),
129 	N(PCREL22),		N(PCREL64I),		N(IPLTMSB),		N(IPLTLSB),
130 	N(COPY),		N(LTOFF22X),		N(LDXMOV),		N(TPREL14),
131 	N(TPREL22),		N(TPREL64I),		N(TPREL64MSB),		N(TPREL64LSB),
132 	N(LTOFF_TPREL22),	N(DTPMOD64MSB),		N(DTPMOD64LSB),		N(LTOFF_DTPMOD22),
133 	N(DTPREL14),		N(DTPREL22),		N(DTPREL64I),		N(DTPREL32MSB),
134 	N(DTPREL32LSB),		N(DTPREL64MSB),		N(DTPREL64LSB),		N(LTOFF_DTPREL22)
135 };
136 
137 #undef N
138 
139 struct got_entry {
140 	uint64_t val;
141 };
142 
143 struct fdesc {
144 	uint64_t ip;
145 	uint64_t gp;
146 };
147 
148 /* Opaque struct for insns, to protect against derefs. */
149 struct insn;
150 
151 static inline uint64_t
152 bundle (const struct insn *insn)
153 {
154 	return (uint64_t) insn & ~0xfUL;
155 }
156 
157 static inline int
158 slot (const struct insn *insn)
159 {
160 	return (uint64_t) insn & 0x3;
161 }
162 
163 static int
164 apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
165 {
166 	if (slot(insn) != 2) {
167 		printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
168 		       mod->name, slot(insn));
169 		return 0;
170 	}
171 	ia64_patch_imm64((u64) insn, val);
172 	return 1;
173 }
174 
175 static int
176 apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
177 {
178 	if (slot(insn) != 2) {
179 		printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
180 		       mod->name, slot(insn));
181 		return 0;
182 	}
183 	if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
184 		printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
185 		return 0;
186 	}
187 	ia64_patch_imm60((u64) insn, val);
188 	return 1;
189 }
190 
191 static int
192 apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
193 {
194 	if (val + (1 << 21) >= (1 << 22)) {
195 		printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
196 		return 0;
197 	}
198 	ia64_patch((u64) insn, 0x01fffcfe000UL, (  ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
199 					         | ((val & 0x1f0000UL) <<  6) /* bit 16 -> 22 */
200 					         | ((val & 0x00ff80UL) << 20) /* bit  7 -> 27 */
201 					         | ((val & 0x00007fUL) << 13) /* bit  0 -> 13 */));
202 	return 1;
203 }
204 
205 static int
206 apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
207 {
208 	if (val + (1 << 20) >= (1 << 21)) {
209 		printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
210 		return 0;
211 	}
212 	ia64_patch((u64) insn, 0x11ffffe000UL, (  ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
213 					        | ((val & 0x0fffffUL) << 13) /* bit  0 -> 13 */));
214 	return 1;
215 }
216 
217 #if USE_BRL
218 
219 struct plt_entry {
220 	/* Three instruction bundles in PLT. */
221  	unsigned char bundle[2][16];
222 };
223 
224 static const struct plt_entry ia64_plt_template = {
225 	{
226 		{
227 			0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
228 			0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /*	     movl gp=TARGET_GP */
229 			0x00, 0x00, 0x00, 0x60
230 		},
231 		{
232 			0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
233 			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*	     brl.many gp=TARGET_GP */
234 			0x08, 0x00, 0x00, 0xc0
235 		}
236 	}
237 };
238 
239 static int
240 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
241 {
242 	if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
243 	    && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
244 			   (target_ip - (int64_t) plt->bundle[1]) / 16))
245 		return 1;
246 	return 0;
247 }
248 
249 unsigned long
250 plt_target (struct plt_entry *plt)
251 {
252 	uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
253 	long off;
254 
255 	b0 = b[0]; b1 = b[1];
256 	off = (  ((b1 & 0x00fffff000000000UL) >> 36)		/* imm20b -> bit 0 */
257 	       | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36)	/* imm39 -> bit 20 */
258 	       | ((b1 & 0x0800000000000000UL) << 0));		/* i -> bit 59 */
259 	return (long) plt->bundle[1] + 16*off;
260 }
261 
262 #else /* !USE_BRL */
263 
264 struct plt_entry {
265 	/* Three instruction bundles in PLT. */
266  	unsigned char bundle[3][16];
267 };
268 
269 static const struct plt_entry ia64_plt_template = {
270 	{
271 		{
272 			0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
273 			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*	     movl r16=TARGET_IP */
274 			0x02, 0x00, 0x00, 0x60
275 		},
276 		{
277 			0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
278 			0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /*	     movl gp=TARGET_GP */
279 			0x00, 0x00, 0x00, 0x60
280 		},
281 		{
282 			0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
283 			0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /*	     mov b6=r16 */
284 			0x60, 0x00, 0x80, 0x00		    /*	     br.few b6 */
285 		}
286 	}
287 };
288 
289 static int
290 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
291 {
292 	if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
293 	    && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
294 		return 1;
295 	return 0;
296 }
297 
298 unsigned long
299 plt_target (struct plt_entry *plt)
300 {
301 	uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
302 
303 	b0 = b[0]; b1 = b[1];
304 	return (  ((b1 & 0x000007f000000000) >> 36)		/* imm7b -> bit 0 */
305 		| ((b1 & 0x07fc000000000000) >> 43)		/* imm9d -> bit 7 */
306 		| ((b1 & 0x0003e00000000000) >> 29)		/* imm5c -> bit 16 */
307 		| ((b1 & 0x0000100000000000) >> 23)		/* ic -> bit 21 */
308 		| ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40)	/* imm41 -> bit 22 */
309 		| ((b1 & 0x0800000000000000) <<  4));		/* i -> bit 63 */
310 }
311 
312 #endif /* !USE_BRL */
313 
314 void *
315 module_alloc (unsigned long size)
316 {
317 	if (!size)
318 		return NULL;
319 	return vmalloc(size);
320 }
321 
322 void
323 module_free (struct module *mod, void *module_region)
324 {
325 	if (mod->arch.init_unw_table && module_region == mod->module_init) {
326 		unw_remove_unwind_table(mod->arch.init_unw_table);
327 		mod->arch.init_unw_table = NULL;
328 	}
329 	vfree(module_region);
330 }
331 
332 /* Have we already seen one of these relocations? */
333 /* FIXME: we could look in other sections, too --RR */
334 static int
335 duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
336 {
337 	unsigned int i;
338 
339 	for (i = 0; i < num; i++) {
340 		if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
341 			return 1;
342 	}
343 	return 0;
344 }
345 
346 /* Count how many GOT entries we may need */
347 static unsigned int
348 count_gots (const Elf64_Rela *rela, unsigned int num)
349 {
350 	unsigned int i, ret = 0;
351 
352 	/* Sure, this is order(n^2), but it's usually short, and not
353            time critical */
354 	for (i = 0; i < num; i++) {
355 		switch (ELF64_R_TYPE(rela[i].r_info)) {
356 		      case R_IA64_LTOFF22:
357 		      case R_IA64_LTOFF22X:
358 		      case R_IA64_LTOFF64I:
359 		      case R_IA64_LTOFF_FPTR22:
360 		      case R_IA64_LTOFF_FPTR64I:
361 		      case R_IA64_LTOFF_FPTR32MSB:
362 		      case R_IA64_LTOFF_FPTR32LSB:
363 		      case R_IA64_LTOFF_FPTR64MSB:
364 		      case R_IA64_LTOFF_FPTR64LSB:
365 			if (!duplicate_reloc(rela, i))
366 				ret++;
367 			break;
368 		}
369 	}
370 	return ret;
371 }
372 
373 /* Count how many PLT entries we may need */
374 static unsigned int
375 count_plts (const Elf64_Rela *rela, unsigned int num)
376 {
377 	unsigned int i, ret = 0;
378 
379 	/* Sure, this is order(n^2), but it's usually short, and not
380            time critical */
381 	for (i = 0; i < num; i++) {
382 		switch (ELF64_R_TYPE(rela[i].r_info)) {
383 		      case R_IA64_PCREL21B:
384 		      case R_IA64_PLTOFF22:
385 		      case R_IA64_PLTOFF64I:
386 		      case R_IA64_PLTOFF64MSB:
387 		      case R_IA64_PLTOFF64LSB:
388 		      case R_IA64_IPLTMSB:
389 		      case R_IA64_IPLTLSB:
390 			if (!duplicate_reloc(rela, i))
391 				ret++;
392 			break;
393 		}
394 	}
395 	return ret;
396 }
397 
398 /* We need to create an function-descriptors for any internal function
399    which is referenced. */
400 static unsigned int
401 count_fdescs (const Elf64_Rela *rela, unsigned int num)
402 {
403 	unsigned int i, ret = 0;
404 
405 	/* Sure, this is order(n^2), but it's usually short, and not time critical.  */
406 	for (i = 0; i < num; i++) {
407 		switch (ELF64_R_TYPE(rela[i].r_info)) {
408 		      case R_IA64_FPTR64I:
409 		      case R_IA64_FPTR32LSB:
410 		      case R_IA64_FPTR32MSB:
411 		      case R_IA64_FPTR64LSB:
412 		      case R_IA64_FPTR64MSB:
413 		      case R_IA64_LTOFF_FPTR22:
414 		      case R_IA64_LTOFF_FPTR32LSB:
415 		      case R_IA64_LTOFF_FPTR32MSB:
416 		      case R_IA64_LTOFF_FPTR64I:
417 		      case R_IA64_LTOFF_FPTR64LSB:
418 		      case R_IA64_LTOFF_FPTR64MSB:
419 		      case R_IA64_IPLTMSB:
420 		      case R_IA64_IPLTLSB:
421 			/*
422 			 * Jumps to static functions sometimes go straight to their
423 			 * offset.  Of course, that may not be possible if the jump is
424 			 * from init -> core or vice. versa, so we need to generate an
425 			 * FDESC (and PLT etc) for that.
426 			 */
427 		      case R_IA64_PCREL21B:
428 			if (!duplicate_reloc(rela, i))
429 				ret++;
430 			break;
431 		}
432 	}
433 	return ret;
434 }
435 
436 int
437 module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
438 			   struct module *mod)
439 {
440 	unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
441 	Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
442 
443 	/*
444 	 * To store the PLTs and function-descriptors, we expand the .text section for
445 	 * core module-code and the .init.text section for initialization code.
446 	 */
447 	for (s = sechdrs; s < sechdrs_end; ++s)
448 		if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
449 			mod->arch.core_plt = s;
450 		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
451 			mod->arch.init_plt = s;
452 		else if (strcmp(".got", secstrings + s->sh_name) == 0)
453 			mod->arch.got = s;
454 		else if (strcmp(".opd", secstrings + s->sh_name) == 0)
455 			mod->arch.opd = s;
456 		else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
457 			mod->arch.unwind = s;
458 
459 	if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
460 		printk(KERN_ERR "%s: sections missing\n", mod->name);
461 		return -ENOEXEC;
462 	}
463 
464 	/* GOT and PLTs can occur in any relocated section... */
465 	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
466 		const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
467 		unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
468 
469 		if (s->sh_type != SHT_RELA)
470 			continue;
471 
472 		gots += count_gots(rels, numrels);
473 		fdescs += count_fdescs(rels, numrels);
474 		if (strstr(secstrings + s->sh_name, ".init"))
475 			init_plts += count_plts(rels, numrels);
476 		else
477 			core_plts += count_plts(rels, numrels);
478 	}
479 
480 	mod->arch.core_plt->sh_type = SHT_NOBITS;
481 	mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
482 	mod->arch.core_plt->sh_addralign = 16;
483 	mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
484 	mod->arch.init_plt->sh_type = SHT_NOBITS;
485 	mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
486 	mod->arch.init_plt->sh_addralign = 16;
487 	mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
488 	mod->arch.got->sh_type = SHT_NOBITS;
489 	mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
490 	mod->arch.got->sh_addralign = 8;
491 	mod->arch.got->sh_size = gots * sizeof(struct got_entry);
492 	mod->arch.opd->sh_type = SHT_NOBITS;
493 	mod->arch.opd->sh_flags = SHF_ALLOC;
494 	mod->arch.opd->sh_addralign = 8;
495 	mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
496 	DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
497 	       __FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
498 	       mod->arch.got->sh_size, mod->arch.opd->sh_size);
499 	return 0;
500 }
501 
502 static inline int
503 in_init (const struct module *mod, uint64_t addr)
504 {
505 	return addr - (uint64_t) mod->module_init < mod->init_size;
506 }
507 
508 static inline int
509 in_core (const struct module *mod, uint64_t addr)
510 {
511 	return addr - (uint64_t) mod->module_core < mod->core_size;
512 }
513 
514 static inline int
515 is_internal (const struct module *mod, uint64_t value)
516 {
517 	return in_init(mod, value) || in_core(mod, value);
518 }
519 
520 /*
521  * Get gp-relative offset for the linkage-table entry of VALUE.
522  */
523 static uint64_t
524 get_ltoff (struct module *mod, uint64_t value, int *okp)
525 {
526 	struct got_entry *got, *e;
527 
528 	if (!*okp)
529 		return 0;
530 
531 	got = (void *) mod->arch.got->sh_addr;
532 	for (e = got; e < got + mod->arch.next_got_entry; ++e)
533 		if (e->val == value)
534 			goto found;
535 
536 	/* Not enough GOT entries? */
537 	if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size))
538 		BUG();
539 
540 	e->val = value;
541 	++mod->arch.next_got_entry;
542   found:
543 	return (uint64_t) e - mod->arch.gp;
544 }
545 
546 static inline int
547 gp_addressable (struct module *mod, uint64_t value)
548 {
549 	return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
550 }
551 
552 /* Get PC-relative PLT entry for this value.  Returns 0 on failure. */
553 static uint64_t
554 get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
555 {
556 	struct plt_entry *plt, *plt_end;
557 	uint64_t target_ip, target_gp;
558 
559 	if (!*okp)
560 		return 0;
561 
562 	if (in_init(mod, (uint64_t) insn)) {
563 		plt = (void *) mod->arch.init_plt->sh_addr;
564 		plt_end = (void *) plt + mod->arch.init_plt->sh_size;
565 	} else {
566 		plt = (void *) mod->arch.core_plt->sh_addr;
567 		plt_end = (void *) plt + mod->arch.core_plt->sh_size;
568 	}
569 
570 	/* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
571 	target_ip = ((uint64_t *) value)[0];
572 	target_gp = ((uint64_t *) value)[1];
573 
574 	/* Look for existing PLT entry. */
575 	while (plt->bundle[0][0]) {
576 		if (plt_target(plt) == target_ip)
577 			goto found;
578 		if (++plt >= plt_end)
579 			BUG();
580 	}
581 	*plt = ia64_plt_template;
582 	if (!patch_plt(mod, plt, target_ip, target_gp)) {
583 		*okp = 0;
584 		return 0;
585 	}
586 #if ARCH_MODULE_DEBUG
587 	if (plt_target(plt) != target_ip) {
588 		printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
589 		       __FUNCTION__, target_ip, plt_target(plt));
590 		*okp = 0;
591 		return 0;
592 	}
593 #endif
594   found:
595 	return (uint64_t) plt;
596 }
597 
598 /* Get function descriptor for VALUE. */
599 static uint64_t
600 get_fdesc (struct module *mod, uint64_t value, int *okp)
601 {
602 	struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
603 
604 	if (!*okp)
605 		return 0;
606 
607 	if (!value) {
608 		printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
609 		return 0;
610 	}
611 
612 	if (!is_internal(mod, value))
613 		/*
614 		 * If it's not a module-local entry-point, "value" already points to a
615 		 * function-descriptor.
616 		 */
617 		return value;
618 
619 	/* Look for existing function descriptor. */
620 	while (fdesc->ip) {
621 		if (fdesc->ip == value)
622 			return (uint64_t)fdesc;
623 		if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
624 			BUG();
625 	}
626 
627 	/* Create new one */
628 	fdesc->ip = value;
629 	fdesc->gp = mod->arch.gp;
630 	return (uint64_t) fdesc;
631 }
632 
633 static inline int
634 do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
635 	  Elf64_Shdr *sec, void *location)
636 {
637 	enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
638 	enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
639 	uint64_t val;
640 	int ok = 1;
641 
642 	val = sym->st_value + addend;
643 
644 	switch (formula) {
645 	      case RV_SEGREL:	/* segment base is arbitrarily chosen to be 0 for kernel modules */
646 	      case RV_DIRECT:
647 		break;
648 
649 	      case RV_GPREL:	  val -= mod->arch.gp; break;
650 	      case RV_LTREL:	  val = get_ltoff(mod, val, &ok); break;
651 	      case RV_PLTREL:	  val = get_plt(mod, location, val, &ok); break;
652 	      case RV_FPTR:	  val = get_fdesc(mod, val, &ok); break;
653 	      case RV_SECREL:	  val -= sec->sh_addr; break;
654 	      case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
655 
656 	      case RV_PCREL:
657 		switch (r_type) {
658 		      case R_IA64_PCREL21B:
659 			if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
660 			    (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
661 				/*
662 				 * Init section may have been allocated far away from core,
663 				 * if the branch won't reach, then allocate a plt for it.
664 				 */
665 				uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
666 				if (delta + (1 << 20) >= (1 << 21)) {
667 					val = get_fdesc(mod, val, &ok);
668 					val = get_plt(mod, location, val, &ok);
669 				}
670 			} else if (!is_internal(mod, val))
671 				val = get_plt(mod, location, val, &ok);
672 			/* FALL THROUGH */
673 		      default:
674 			val -= bundle(location);
675 			break;
676 
677 		      case R_IA64_PCREL32MSB:
678 		      case R_IA64_PCREL32LSB:
679 		      case R_IA64_PCREL64MSB:
680 		      case R_IA64_PCREL64LSB:
681 			val -= (uint64_t) location;
682 			break;
683 
684 		}
685 		switch (r_type) {
686 		      case R_IA64_PCREL60B: format = RF_INSN60; break;
687 		      case R_IA64_PCREL21B: format = RF_INSN21B; break;
688 		      case R_IA64_PCREL21M: format = RF_INSN21M; break;
689 		      case R_IA64_PCREL21F: format = RF_INSN21F; break;
690 		      default: break;
691 		}
692 		break;
693 
694 	      case RV_BDREL:
695 		val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
696 		break;
697 
698 	      case RV_LTV:
699 		/* can link-time value relocs happen here?  */
700 		BUG();
701 		break;
702 
703 	      case RV_PCREL2:
704 		if (r_type == R_IA64_PCREL21BI) {
705 			if (!is_internal(mod, val)) {
706 				printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
707 				       __FUNCTION__, reloc_name[r_type], val);
708 				return -ENOEXEC;
709 			}
710 			format = RF_INSN21B;
711 		}
712 		val -= bundle(location);
713 		break;
714 
715 	      case RV_SPECIAL:
716 		switch (r_type) {
717 		      case R_IA64_IPLTMSB:
718 		      case R_IA64_IPLTLSB:
719 			val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
720 			format = RF_64LSB;
721 			if (r_type == R_IA64_IPLTMSB)
722 				format = RF_64MSB;
723 			break;
724 
725 		      case R_IA64_SUB:
726 			val = addend - sym->st_value;
727 			format = RF_INSN64;
728 			break;
729 
730 		      case R_IA64_LTOFF22X:
731 			if (gp_addressable(mod, val))
732 				val -= mod->arch.gp;
733 			else
734 				val = get_ltoff(mod, val, &ok);
735 			format = RF_INSN22;
736 			break;
737 
738 		      case R_IA64_LDXMOV:
739 			if (gp_addressable(mod, val)) {
740 				/* turn "ld8" into "mov": */
741 				DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location);
742 				ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
743 			}
744 			return 0;
745 
746 		      default:
747 			if (reloc_name[r_type])
748 				printk(KERN_ERR "%s: special reloc %s not supported",
749 				       mod->name, reloc_name[r_type]);
750 			else
751 				printk(KERN_ERR "%s: unknown special reloc %x\n",
752 				       mod->name, r_type);
753 			return -ENOEXEC;
754 		}
755 		break;
756 
757 	      case RV_TPREL:
758 	      case RV_LTREL_TPREL:
759 	      case RV_DTPMOD:
760 	      case RV_LTREL_DTPMOD:
761 	      case RV_DTPREL:
762 	      case RV_LTREL_DTPREL:
763 		printk(KERN_ERR "%s: %s reloc not supported\n",
764 		       mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
765 		return -ENOEXEC;
766 
767 	      default:
768 		printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
769 		return -ENOEXEC;
770 	}
771 
772 	if (!ok)
773 		return -ENOEXEC;
774 
775 	DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val,
776 	       reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
777 
778 	switch (format) {
779 	      case RF_INSN21B:	ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
780 	      case RF_INSN22:	ok = apply_imm22(mod, location, val); break;
781 	      case RF_INSN64:	ok = apply_imm64(mod, location, val); break;
782 	      case RF_INSN60:	ok = apply_imm60(mod, location, (int64_t) val / 16); break;
783 	      case RF_32LSB:	put_unaligned(val, (uint32_t *) location); break;
784 	      case RF_64LSB:	put_unaligned(val, (uint64_t *) location); break;
785 	      case RF_32MSB:	/* ia64 Linux is little-endian... */
786 	      case RF_64MSB:	/* ia64 Linux is little-endian... */
787 	      case RF_INSN14:	/* must be within-module, i.e., resolved by "ld -r" */
788 	      case RF_INSN21M:	/* must be within-module, i.e., resolved by "ld -r" */
789 	      case RF_INSN21F:	/* must be within-module, i.e., resolved by "ld -r" */
790 		printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
791 		       mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
792 		return -ENOEXEC;
793 
794 	      default:
795 		printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
796 		       mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
797 		return -ENOEXEC;
798 	}
799 	return ok ? 0 : -ENOEXEC;
800 }
801 
802 int
803 apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
804 		    unsigned int relsec, struct module *mod)
805 {
806 	unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
807 	Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
808 	Elf64_Shdr *target_sec;
809 	int ret;
810 
811 	DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__,
812 	       relsec, n, sechdrs[relsec].sh_info);
813 
814 	target_sec = sechdrs + sechdrs[relsec].sh_info;
815 
816 	if (target_sec->sh_entsize == ~0UL)
817 		/*
818 		 * If target section wasn't allocated, we don't need to relocate it.
819 		 * Happens, e.g., for debug sections.
820 		 */
821 		return 0;
822 
823 	if (!mod->arch.gp) {
824 		/*
825 		 * XXX Should have an arch-hook for running this after final section
826 		 *     addresses have been selected...
827 		 */
828 		/* See if gp can cover the entire core module:  */
829 		uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2;
830 		if (mod->core_size >= MAX_LTOFF)
831 			/*
832 			 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
833 			 * at the end of the module.
834 			 */
835 			gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2;
836 		mod->arch.gp = gp;
837 		DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
838 	}
839 
840 	for (i = 0; i < n; i++) {
841 		ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
842 			       ((Elf64_Sym *) sechdrs[symindex].sh_addr
843 				+ ELF64_R_SYM(rela[i].r_info)),
844 			       rela[i].r_addend, target_sec,
845 			       (void *) target_sec->sh_addr + rela[i].r_offset);
846 		if (ret < 0)
847 			return ret;
848 	}
849 	return 0;
850 }
851 
852 int
853 apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
854 		unsigned int relsec, struct module *mod)
855 {
856 	printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
857 	return -ENOEXEC;
858 }
859 
860 /*
861  * Modules contain a single unwind table which covers both the core and the init text
862  * sections but since the two are not contiguous, we need to split this table up such that
863  * we can register (and unregister) each "segment" seperately.  Fortunately, this sounds
864  * more complicated than it really is.
865  */
866 static void
867 register_unwind_table (struct module *mod)
868 {
869 	struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
870 	struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
871 	struct unw_table_entry tmp, *e1, *e2, *core, *init;
872 	unsigned long num_init = 0, num_core = 0;
873 
874 	/* First, count how many init and core unwind-table entries there are.  */
875 	for (e1 = start; e1 < end; ++e1)
876 		if (in_init(mod, e1->start_offset))
877 			++num_init;
878 		else
879 			++num_core;
880 	/*
881 	 * Second, sort the table such that all unwind-table entries for the init and core
882 	 * text sections are nicely separated.  We do this with a stupid bubble sort
883 	 * (unwind tables don't get ridiculously huge).
884 	 */
885 	for (e1 = start; e1 < end; ++e1) {
886 		for (e2 = e1 + 1; e2 < end; ++e2) {
887 			if (e2->start_offset < e1->start_offset) {
888 				tmp = *e1;
889 				*e1 = *e2;
890 				*e2 = tmp;
891 			}
892 		}
893 	}
894 	/*
895 	 * Third, locate the init and core segments in the unwind table:
896 	 */
897 	if (in_init(mod, start->start_offset)) {
898 		init = start;
899 		core = start + num_init;
900 	} else {
901 		core = start;
902 		init = start + num_core;
903 	}
904 
905 	DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__,
906 	       mod->name, mod->arch.gp, num_init, num_core);
907 
908 	/*
909 	 * Fourth, register both tables (if not empty).
910 	 */
911 	if (num_core > 0) {
912 		mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
913 								core, core + num_core);
914 		DEBUGP("%s:  core: handle=%p [%p-%p)\n", __FUNCTION__,
915 		       mod->arch.core_unw_table, core, core + num_core);
916 	}
917 	if (num_init > 0) {
918 		mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
919 								init, init + num_init);
920 		DEBUGP("%s:  init: handle=%p [%p-%p)\n", __FUNCTION__,
921 		       mod->arch.init_unw_table, init, init + num_init);
922 	}
923 }
924 
925 int
926 module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
927 {
928 	DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init);
929 	if (mod->arch.unwind)
930 		register_unwind_table(mod);
931 	return 0;
932 }
933 
934 void
935 module_arch_cleanup (struct module *mod)
936 {
937 	if (mod->arch.init_unw_table)
938 		unw_remove_unwind_table(mod->arch.init_unw_table);
939 	if (mod->arch.core_unw_table)
940 		unw_remove_unwind_table(mod->arch.core_unw_table);
941 }
942 
943 #ifdef CONFIG_SMP
944 void
945 percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
946 {
947 	unsigned int i;
948 	for (i = 0; i < NR_CPUS; i++)
949 		if (cpu_possible(i))
950 			memcpy(pcpudst + __per_cpu_offset[i], src, size);
951 }
952 #endif /* CONFIG_SMP */
953