xref: /openbmc/linux/arch/mips/kernel/vpe.c (revision 7fb6f7b0)
1e01402b1SRalf Baechle /*
25792bf64SSteven J. Hill  * This file is subject to the terms and conditions of the GNU General Public
35792bf64SSteven J. Hill  * License.  See the file "COPYING" in the main directory of this archive
4e01402b1SRalf Baechle  * for more details.
5e01402b1SRalf Baechle  *
65792bf64SSteven J. Hill  * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
75792bf64SSteven J. Hill  * Copyright (C) 2013 Imagination Technologies Ltd.
8e01402b1SRalf Baechle  *
95792bf64SSteven J. Hill  * VPE spport module for loading a MIPS SP program into VPE1. The SP
105792bf64SSteven J. Hill  * environment is rather simple since there are no TLBs. It needs
115792bf64SSteven J. Hill  * to be relocatable (or partiall linked). Initialize your stack in
125792bf64SSteven J. Hill  * the startup-code. The loader looks for the symbol __start and sets
135792bf64SSteven J. Hill  * up the execution to resume from there. To load and run, simply do
145792bf64SSteven J. Hill  * a cat SP 'binary' to the /dev/vpe1 device.
15e01402b1SRalf Baechle  */
16e01402b1SRalf Baechle #include <linux/kernel.h>
1727a3bbafSRalf Baechle #include <linux/device.h>
18e01402b1SRalf Baechle #include <linux/fs.h>
19e01402b1SRalf Baechle #include <linux/init.h>
20e01402b1SRalf Baechle #include <linux/slab.h>
21e01402b1SRalf Baechle #include <linux/list.h>
22e01402b1SRalf Baechle #include <linux/vmalloc.h>
23e01402b1SRalf Baechle #include <linux/elf.h>
24e01402b1SRalf Baechle #include <linux/seq_file.h>
25e01402b1SRalf Baechle #include <linux/syscalls.h>
26e01402b1SRalf Baechle #include <linux/moduleloader.h>
27e01402b1SRalf Baechle #include <linux/interrupt.h>
28e01402b1SRalf Baechle #include <linux/poll.h>
2957c8a661SMike Rapoport #include <linux/memblock.h>
30e01402b1SRalf Baechle #include <asm/mipsregs.h>
31340ee4b9SRalf Baechle #include <asm/mipsmtregs.h>
32e01402b1SRalf Baechle #include <asm/cacheflush.h>
3360063497SArun Sharma #include <linux/atomic.h>
3427a3bbafSRalf Baechle #include <asm/mips_mt.h>
35e01402b1SRalf Baechle #include <asm/processor.h>
362600990eSRalf Baechle #include <asm/vpe.h>
37e01402b1SRalf Baechle 
38e01402b1SRalf Baechle #ifndef ARCH_SHF_SMALL
39e01402b1SRalf Baechle #define ARCH_SHF_SMALL 0
40e01402b1SRalf Baechle #endif
41e01402b1SRalf Baechle 
42e01402b1SRalf Baechle /* If this is set, the section belongs in the init part of the module */
43e01402b1SRalf Baechle #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
44e01402b1SRalf Baechle 
451a2a6d7eSDeng-Cheng Zhu struct vpe_control vpecontrol = {
4652bd080dSThomas Gleixner 	.vpe_list_lock	= __SPIN_LOCK_UNLOCKED(vpe_list_lock),
479cfdf6f1SRalf Baechle 	.vpe_list	= LIST_HEAD_INIT(vpecontrol.vpe_list),
4852bd080dSThomas Gleixner 	.tc_list_lock	= __SPIN_LOCK_UNLOCKED(tc_list_lock),
499cfdf6f1SRalf Baechle 	.tc_list	= LIST_HEAD_INIT(vpecontrol.tc_list)
509cfdf6f1SRalf Baechle };
51e01402b1SRalf Baechle 
52e01402b1SRalf Baechle /* get the vpe associated with this minor */
get_vpe(int minor)531a2a6d7eSDeng-Cheng Zhu struct vpe *get_vpe(int minor)
54e01402b1SRalf Baechle {
551bbfc20dSRalf Baechle 	struct vpe *res, *v;
56e01402b1SRalf Baechle 
572600990eSRalf Baechle 	if (!cpu_has_mipsmt)
582600990eSRalf Baechle 		return NULL;
592600990eSRalf Baechle 
601bbfc20dSRalf Baechle 	res = NULL;
611bbfc20dSRalf Baechle 	spin_lock(&vpecontrol.vpe_list_lock);
62e01402b1SRalf Baechle 	list_for_each_entry(v, &vpecontrol.vpe_list, list) {
631a2a6d7eSDeng-Cheng Zhu 		if (v->minor == VPE_MODULE_MINOR) {
641bbfc20dSRalf Baechle 			res = v;
651bbfc20dSRalf Baechle 			break;
66e01402b1SRalf Baechle 		}
671bbfc20dSRalf Baechle 	}
681bbfc20dSRalf Baechle 	spin_unlock(&vpecontrol.vpe_list_lock);
69e01402b1SRalf Baechle 
701bbfc20dSRalf Baechle 	return res;
71e01402b1SRalf Baechle }
72e01402b1SRalf Baechle 
73e01402b1SRalf Baechle /* get the vpe associated with this minor */
get_tc(int index)741a2a6d7eSDeng-Cheng Zhu struct tc *get_tc(int index)
75e01402b1SRalf Baechle {
761bbfc20dSRalf Baechle 	struct tc *res, *t;
77e01402b1SRalf Baechle 
781bbfc20dSRalf Baechle 	res = NULL;
791bbfc20dSRalf Baechle 	spin_lock(&vpecontrol.tc_list_lock);
80e01402b1SRalf Baechle 	list_for_each_entry(t, &vpecontrol.tc_list, list) {
811bbfc20dSRalf Baechle 		if (t->index == index) {
821bbfc20dSRalf Baechle 			res = t;
831bbfc20dSRalf Baechle 			break;
84e01402b1SRalf Baechle 		}
851bbfc20dSRalf Baechle 	}
861bbfc20dSRalf Baechle 	spin_unlock(&vpecontrol.tc_list_lock);
87e01402b1SRalf Baechle 
889fbcbd7eSHillf Danton 	return res;
89e01402b1SRalf Baechle }
90e01402b1SRalf Baechle 
91e01402b1SRalf Baechle /* allocate a vpe and associate it with this minor (or index) */
alloc_vpe(int minor)921a2a6d7eSDeng-Cheng Zhu struct vpe *alloc_vpe(int minor)
93e01402b1SRalf Baechle {
94e01402b1SRalf Baechle 	struct vpe *v;
95e01402b1SRalf Baechle 
965792bf64SSteven J. Hill 	v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
975792bf64SSteven J. Hill 	if (v == NULL)
985792bf64SSteven J. Hill 		goto out;
99e01402b1SRalf Baechle 
100e01402b1SRalf Baechle 	INIT_LIST_HEAD(&v->tc);
1011bbfc20dSRalf Baechle 	spin_lock(&vpecontrol.vpe_list_lock);
102e01402b1SRalf Baechle 	list_add_tail(&v->list, &vpecontrol.vpe_list);
1031bbfc20dSRalf Baechle 	spin_unlock(&vpecontrol.vpe_list_lock);
104e01402b1SRalf Baechle 
1052600990eSRalf Baechle 	INIT_LIST_HEAD(&v->notify);
1061a2a6d7eSDeng-Cheng Zhu 	v->minor = VPE_MODULE_MINOR;
1071bbfc20dSRalf Baechle 
1085792bf64SSteven J. Hill out:
109e01402b1SRalf Baechle 	return v;
110e01402b1SRalf Baechle }
111e01402b1SRalf Baechle 
112e01402b1SRalf Baechle /* allocate a tc. At startup only tc0 is running, all other can be halted. */
alloc_tc(int index)1131a2a6d7eSDeng-Cheng Zhu struct tc *alloc_tc(int index)
114e01402b1SRalf Baechle {
11507cc0c9eSRalf Baechle 	struct tc *tc;
116e01402b1SRalf Baechle 
1175792bf64SSteven J. Hill 	tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
1185792bf64SSteven J. Hill 	if (tc == NULL)
11907cc0c9eSRalf Baechle 		goto out;
120e01402b1SRalf Baechle 
12107cc0c9eSRalf Baechle 	INIT_LIST_HEAD(&tc->tc);
12207cc0c9eSRalf Baechle 	tc->index = index;
1231bbfc20dSRalf Baechle 
1241bbfc20dSRalf Baechle 	spin_lock(&vpecontrol.tc_list_lock);
12507cc0c9eSRalf Baechle 	list_add_tail(&tc->list, &vpecontrol.tc_list);
1261bbfc20dSRalf Baechle 	spin_unlock(&vpecontrol.tc_list_lock);
127e01402b1SRalf Baechle 
12807cc0c9eSRalf Baechle out:
12907cc0c9eSRalf Baechle 	return tc;
130e01402b1SRalf Baechle }
131e01402b1SRalf Baechle 
132e01402b1SRalf Baechle /* clean up and free everything */
release_vpe(struct vpe * v)1331a2a6d7eSDeng-Cheng Zhu void release_vpe(struct vpe *v)
134e01402b1SRalf Baechle {
135e01402b1SRalf Baechle 	list_del(&v->list);
136e01402b1SRalf Baechle 	if (v->load_addr)
137bef8e2dfSChristophe JAILLET 		release_progmem(v->load_addr);
138e01402b1SRalf Baechle 	kfree(v);
139e01402b1SRalf Baechle }
140e01402b1SRalf Baechle 
141e01402b1SRalf Baechle /* Find some VPE program space */
alloc_progmem(unsigned long len)1421a2a6d7eSDeng-Cheng Zhu void *alloc_progmem(unsigned long len)
143e01402b1SRalf Baechle {
1445408c490SRalf Baechle 	void *addr;
1455408c490SRalf Baechle 
146e01402b1SRalf Baechle #ifdef CONFIG_MIPS_VPE_LOADER_TOM
1475408c490SRalf Baechle 	/*
1485408c490SRalf Baechle 	 * This means you must tell Linux to use less memory than you
1495408c490SRalf Baechle 	 * physically have, for example by passing a mem= boot argument.
1505408c490SRalf Baechle 	 */
1519f2546adSRalf Baechle 	addr = pfn_to_kaddr(max_low_pfn);
1525408c490SRalf Baechle 	memset(addr, 0, len);
153e01402b1SRalf Baechle #else
1545408c490SRalf Baechle 	/* simple grab some mem for now */
1555408c490SRalf Baechle 	addr = kzalloc(len, GFP_KERNEL);
156e01402b1SRalf Baechle #endif
1575408c490SRalf Baechle 
1585408c490SRalf Baechle 	return addr;
159e01402b1SRalf Baechle }
160e01402b1SRalf Baechle 
release_progmem(void * ptr)1611a2a6d7eSDeng-Cheng Zhu void release_progmem(void *ptr)
162e01402b1SRalf Baechle {
163e01402b1SRalf Baechle #ifndef CONFIG_MIPS_VPE_LOADER_TOM
164e01402b1SRalf Baechle 	kfree(ptr);
165e01402b1SRalf Baechle #endif
166e01402b1SRalf Baechle }
167e01402b1SRalf Baechle 
168e01402b1SRalf Baechle /* Update size with this section: return offset. */
get_offset(unsigned long * size,Elf_Shdr * sechdr)169e01402b1SRalf Baechle static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
170e01402b1SRalf Baechle {
171e01402b1SRalf Baechle 	long ret;
172e01402b1SRalf Baechle 
173e01402b1SRalf Baechle 	ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
174e01402b1SRalf Baechle 	*size = ret + sechdr->sh_size;
175e01402b1SRalf Baechle 	return ret;
176e01402b1SRalf Baechle }
177e01402b1SRalf Baechle 
178e01402b1SRalf Baechle /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
179e01402b1SRalf Baechle    might -- code, read-only data, read-write data, small data.	Tally
180e01402b1SRalf Baechle    sizes, and place the offsets into sh_entsize fields: high bit means it
181e01402b1SRalf Baechle    belongs in init. */
layout_sections(struct module * mod,const Elf_Ehdr * hdr,Elf_Shdr * sechdrs,const char * secstrings)182e01402b1SRalf Baechle static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
183e01402b1SRalf Baechle 			    Elf_Shdr *sechdrs, const char *secstrings)
184e01402b1SRalf Baechle {
185e01402b1SRalf Baechle 	static unsigned long const masks[][2] = {
186e01402b1SRalf Baechle 		/* NOTE: all executable code must be the first section
187e01402b1SRalf Baechle 		 * in this array; otherwise modify the text_size
188e01402b1SRalf Baechle 		 * finder in the two loops below */
189e01402b1SRalf Baechle 		{SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
190e01402b1SRalf Baechle 		{SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
191e01402b1SRalf Baechle 		{SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
192e01402b1SRalf Baechle 		{ARCH_SHF_SMALL | SHF_ALLOC, 0}
193e01402b1SRalf Baechle 	};
194e01402b1SRalf Baechle 	unsigned int m, i;
195e01402b1SRalf Baechle 
196e01402b1SRalf Baechle 	for (i = 0; i < hdr->e_shnum; i++)
197e01402b1SRalf Baechle 		sechdrs[i].sh_entsize = ~0UL;
198e01402b1SRalf Baechle 
199e01402b1SRalf Baechle 	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
200e01402b1SRalf Baechle 		for (i = 0; i < hdr->e_shnum; ++i) {
201e01402b1SRalf Baechle 			Elf_Shdr *s = &sechdrs[i];
202e01402b1SRalf Baechle 			struct module_memory *mod_mem;
203e01402b1SRalf Baechle 
204e01402b1SRalf Baechle 			mod_mem = &mod->mem[MOD_TEXT];
205e01402b1SRalf Baechle 
206e01402b1SRalf Baechle 			if ((s->sh_flags & masks[m][0]) != masks[m][0]
207e2a9cf96SRaghu Gandham 			    || (s->sh_flags & masks[m][1])
2087523e4dcSRusty Russell 			    || s->sh_entsize != ~0UL)
209e01402b1SRalf Baechle 				continue;
210e01402b1SRalf Baechle 			s->sh_entsize =
211e01402b1SRalf Baechle 				get_offset((unsigned long *)&mod_mem->size, s);
2127523e4dcSRusty Russell 		}
213e01402b1SRalf Baechle 	}
214e01402b1SRalf Baechle }
215e01402b1SRalf Baechle 
216e01402b1SRalf Baechle /* from module-elf32.c, but subverted a little */
217e01402b1SRalf Baechle 
218e01402b1SRalf Baechle struct mips_hi16 {
219e01402b1SRalf Baechle 	struct mips_hi16 *next;
220e01402b1SRalf Baechle 	Elf32_Addr *addr;
221e01402b1SRalf Baechle 	Elf32_Addr value;
222e01402b1SRalf Baechle };
223e01402b1SRalf Baechle 
224e01402b1SRalf Baechle static struct mips_hi16 *mips_hi16_list;
225e01402b1SRalf Baechle static unsigned int gp_offs, gp_addr;
226e01402b1SRalf Baechle 
apply_r_mips_none(struct module * me,uint32_t * location,Elf32_Addr v)227e01402b1SRalf Baechle static int apply_r_mips_none(struct module *me, uint32_t *location,
228e01402b1SRalf Baechle 			     Elf32_Addr v)
229e01402b1SRalf Baechle {
230e01402b1SRalf Baechle 	return 0;
231e01402b1SRalf Baechle }
232e01402b1SRalf Baechle 
apply_r_mips_gprel16(struct module * me,uint32_t * location,Elf32_Addr v)233e01402b1SRalf Baechle static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
234e01402b1SRalf Baechle 				Elf32_Addr v)
235e01402b1SRalf Baechle {
236e01402b1SRalf Baechle 	int rel;
237e01402b1SRalf Baechle 
238e01402b1SRalf Baechle 	if (!(*location & 0xffff)) {
239e01402b1SRalf Baechle 		rel = (int)v - gp_addr;
240e01402b1SRalf Baechle 	} else {
2415792bf64SSteven J. Hill 		/* .sbss + gp(relative) + offset */
242e01402b1SRalf Baechle 		/* kludge! */
243e01402b1SRalf Baechle 		rel =  (int)(short)((int)v + gp_offs +
244e01402b1SRalf Baechle 				    (int)(short)(*location & 0xffff) - gp_addr);
245e01402b1SRalf Baechle 	}
246e01402b1SRalf Baechle 
247e01402b1SRalf Baechle 	if ((rel > 32768) || (rel < -32768)) {
248e01402b1SRalf Baechle 		pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
2495792bf64SSteven J. Hill 			 rel);
2502600990eSRalf Baechle 		return -ENOEXEC;
251e01402b1SRalf Baechle 	}
252e01402b1SRalf Baechle 
253e01402b1SRalf Baechle 	*location = (*location & 0xffff0000) | (rel & 0xffff);
254e01402b1SRalf Baechle 
255e01402b1SRalf Baechle 	return 0;
256e01402b1SRalf Baechle }
257e01402b1SRalf Baechle 
apply_r_mips_pc16(struct module * me,uint32_t * location,Elf32_Addr v)258e01402b1SRalf Baechle static int apply_r_mips_pc16(struct module *me, uint32_t *location,
259e01402b1SRalf Baechle 			     Elf32_Addr v)
260e01402b1SRalf Baechle {
261e01402b1SRalf Baechle 	int rel;
262e01402b1SRalf Baechle 	rel = (((unsigned int)v - (unsigned int)location));
263e01402b1SRalf Baechle 	rel >>= 2; /* because the offset is in _instructions_ not bytes. */
2645792bf64SSteven J. Hill 	rel -= 1;  /* and one instruction less due to the branch delay slot. */
2655792bf64SSteven J. Hill 
266e01402b1SRalf Baechle 	if ((rel > 32768) || (rel < -32768)) {
267e01402b1SRalf Baechle 		pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
2685792bf64SSteven J. Hill 			 rel);
2695792bf64SSteven J. Hill 		return -ENOEXEC;
270e01402b1SRalf Baechle 	}
271e01402b1SRalf Baechle 
272e01402b1SRalf Baechle 	*location = (*location & 0xffff0000) | (rel & 0xffff);
273e01402b1SRalf Baechle 
274e01402b1SRalf Baechle 	return 0;
275e01402b1SRalf Baechle }
276e01402b1SRalf Baechle 
apply_r_mips_32(struct module * me,uint32_t * location,Elf32_Addr v)277e01402b1SRalf Baechle static int apply_r_mips_32(struct module *me, uint32_t *location,
278e01402b1SRalf Baechle 			   Elf32_Addr v)
279e01402b1SRalf Baechle {
280e01402b1SRalf Baechle 	*location += v;
281e01402b1SRalf Baechle 
282e01402b1SRalf Baechle 	return 0;
283e01402b1SRalf Baechle }
284e01402b1SRalf Baechle 
apply_r_mips_26(struct module * me,uint32_t * location,Elf32_Addr v)285e01402b1SRalf Baechle static int apply_r_mips_26(struct module *me, uint32_t *location,
286e01402b1SRalf Baechle 			   Elf32_Addr v)
287e01402b1SRalf Baechle {
288e01402b1SRalf Baechle 	if (v % 4) {
289e01402b1SRalf Baechle 		pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
2905792bf64SSteven J. Hill 		return -ENOEXEC;
291e01402b1SRalf Baechle 	}
292e01402b1SRalf Baechle 
293e01402b1SRalf Baechle /*
294307bd284SRalf Baechle  * Not desperately convinced this is a good check of an overflow condition
295307bd284SRalf Baechle  * anyway. But it gets in the way of handling undefined weak symbols which
296307bd284SRalf Baechle  * we want to set to zero.
297307bd284SRalf Baechle  * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
298307bd284SRalf Baechle  * printk(KERN_ERR
299307bd284SRalf Baechle  * "module %s: relocation overflow\n",
300307bd284SRalf Baechle  * me->name);
301307bd284SRalf Baechle  * return -ENOEXEC;
302307bd284SRalf Baechle  * }
303307bd284SRalf Baechle  */
304e01402b1SRalf Baechle 
305e01402b1SRalf Baechle 	*location = (*location & ~0x03ffffff) |
306e01402b1SRalf Baechle 		((*location + (v >> 2)) & 0x03ffffff);
307e01402b1SRalf Baechle 	return 0;
308e01402b1SRalf Baechle }
309e01402b1SRalf Baechle 
apply_r_mips_hi16(struct module * me,uint32_t * location,Elf32_Addr v)310e01402b1SRalf Baechle static int apply_r_mips_hi16(struct module *me, uint32_t *location,
311e01402b1SRalf Baechle 			     Elf32_Addr v)
312e01402b1SRalf Baechle {
313e01402b1SRalf Baechle 	struct mips_hi16 *n;
314e01402b1SRalf Baechle 
315e01402b1SRalf Baechle 	/*
316e01402b1SRalf Baechle 	 * We cannot relocate this one now because we don't know the value of
317e01402b1SRalf Baechle 	 * the carry we need to add.  Save the information, and let LO16 do the
318e01402b1SRalf Baechle 	 * actual relocation.
319e01402b1SRalf Baechle 	 */
320e01402b1SRalf Baechle 	n = kmalloc(sizeof(*n), GFP_KERNEL);
3215792bf64SSteven J. Hill 	if (!n)
322e01402b1SRalf Baechle 		return -ENOMEM;
323e01402b1SRalf Baechle 
324e01402b1SRalf Baechle 	n->addr = location;
325e01402b1SRalf Baechle 	n->value = v;
326e01402b1SRalf Baechle 	n->next = mips_hi16_list;
327e01402b1SRalf Baechle 	mips_hi16_list = n;
328e01402b1SRalf Baechle 
329e01402b1SRalf Baechle 	return 0;
330e01402b1SRalf Baechle }
331e01402b1SRalf Baechle 
apply_r_mips_lo16(struct module * me,uint32_t * location,Elf32_Addr v)332e01402b1SRalf Baechle static int apply_r_mips_lo16(struct module *me, uint32_t *location,
333e01402b1SRalf Baechle 			     Elf32_Addr v)
334e01402b1SRalf Baechle {
335e01402b1SRalf Baechle 	unsigned long insnlo = *location;
336e01402b1SRalf Baechle 	Elf32_Addr val, vallo;
337e01402b1SRalf Baechle 	struct mips_hi16 *l, *next;
338477c4b07SRalf Baechle 
339e01402b1SRalf Baechle 	/* Sign extend the addend we extract from the lo insn.	*/
340e01402b1SRalf Baechle 	vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
341e01402b1SRalf Baechle 
342e01402b1SRalf Baechle 	if (mips_hi16_list != NULL) {
343e01402b1SRalf Baechle 
344e01402b1SRalf Baechle 		l = mips_hi16_list;
345e01402b1SRalf Baechle 		while (l != NULL) {
346e01402b1SRalf Baechle 			unsigned long insn;
347e01402b1SRalf Baechle 
348e01402b1SRalf Baechle 			/*
349e01402b1SRalf Baechle 			 * The value for the HI16 had best be the same.
350e01402b1SRalf Baechle 			 */
351e01402b1SRalf Baechle 			if (v != l->value) {
352e01402b1SRalf Baechle 				pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
3535792bf64SSteven J. Hill 				goto out_free;
354477c4b07SRalf Baechle 			}
355e01402b1SRalf Baechle 
356e01402b1SRalf Baechle 			/*
357e01402b1SRalf Baechle 			 * Do the HI16 relocation.  Note that we actually don't
358e01402b1SRalf Baechle 			 * need to know anything about the LO16 itself, except
359e01402b1SRalf Baechle 			 * where to find the low 16 bits of the addend needed
360e01402b1SRalf Baechle 			 * by the LO16.
361e01402b1SRalf Baechle 			 */
362e01402b1SRalf Baechle 			insn = *l->addr;
363e01402b1SRalf Baechle 			val = ((insn & 0xffff) << 16) + vallo;
364e01402b1SRalf Baechle 			val += v;
365e01402b1SRalf Baechle 
366e01402b1SRalf Baechle 			/*
367e01402b1SRalf Baechle 			 * Account for the sign extension that will happen in
368e01402b1SRalf Baechle 			 * the low bits.
369e01402b1SRalf Baechle 			 */
370e01402b1SRalf Baechle 			val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
371e01402b1SRalf Baechle 
372e01402b1SRalf Baechle 			insn = (insn & ~0xffff) | val;
373e01402b1SRalf Baechle 			*l->addr = insn;
374e01402b1SRalf Baechle 
375e01402b1SRalf Baechle 			next = l->next;
376e01402b1SRalf Baechle 			kfree(l);
377e01402b1SRalf Baechle 			l = next;
378e01402b1SRalf Baechle 		}
379e01402b1SRalf Baechle 
380e01402b1SRalf Baechle 		mips_hi16_list = NULL;
381e01402b1SRalf Baechle 	}
382e01402b1SRalf Baechle 
383e01402b1SRalf Baechle 	/*
384e01402b1SRalf Baechle 	 * Ok, we're done with the HI16 relocs.	 Now deal with the LO16.
385e01402b1SRalf Baechle 	 */
386e01402b1SRalf Baechle 	val = v + vallo;
387e01402b1SRalf Baechle 	insnlo = (insnlo & ~0xffff) | (val & 0xffff);
388e01402b1SRalf Baechle 	*location = insnlo;
389e01402b1SRalf Baechle 
390e01402b1SRalf Baechle 	return 0;
391e01402b1SRalf Baechle 
392477c4b07SRalf Baechle out_free:
393477c4b07SRalf Baechle 	while (l != NULL) {
394477c4b07SRalf Baechle 		next = l->next;
395477c4b07SRalf Baechle 		kfree(l);
396477c4b07SRalf Baechle 		l = next;
397477c4b07SRalf Baechle 	}
398477c4b07SRalf Baechle 	mips_hi16_list = NULL;
399477c4b07SRalf Baechle 
400477c4b07SRalf Baechle 	return -ENOEXEC;
401477c4b07SRalf Baechle }
402e01402b1SRalf Baechle 
403e01402b1SRalf Baechle static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
404e01402b1SRalf Baechle 				Elf32_Addr v) = {
405e01402b1SRalf Baechle 	[R_MIPS_NONE]	= apply_r_mips_none,
406e01402b1SRalf Baechle 	[R_MIPS_32]	= apply_r_mips_32,
407e01402b1SRalf Baechle 	[R_MIPS_26]	= apply_r_mips_26,
408e01402b1SRalf Baechle 	[R_MIPS_HI16]	= apply_r_mips_hi16,
409e01402b1SRalf Baechle 	[R_MIPS_LO16]	= apply_r_mips_lo16,
410e01402b1SRalf Baechle 	[R_MIPS_GPREL16] = apply_r_mips_gprel16,
411e01402b1SRalf Baechle 	[R_MIPS_PC16] = apply_r_mips_pc16
412e01402b1SRalf Baechle };
413e01402b1SRalf Baechle 
414e01402b1SRalf Baechle static char *rstrs[] = {
4152600990eSRalf Baechle 	[R_MIPS_NONE]	= "MIPS_NONE",
4162600990eSRalf Baechle 	[R_MIPS_32]	= "MIPS_32",
4172600990eSRalf Baechle 	[R_MIPS_26]	= "MIPS_26",
4182600990eSRalf Baechle 	[R_MIPS_HI16]	= "MIPS_HI16",
4192600990eSRalf Baechle 	[R_MIPS_LO16]	= "MIPS_LO16",
4202600990eSRalf Baechle 	[R_MIPS_GPREL16] = "MIPS_GPREL16",
4212600990eSRalf Baechle 	[R_MIPS_PC16] = "MIPS_PC16"
4222600990eSRalf Baechle };
4232600990eSRalf Baechle 
apply_relocations(Elf32_Shdr * sechdrs,const char * strtab,unsigned int symindex,unsigned int relsec,struct module * me)424e01402b1SRalf Baechle static int apply_relocations(Elf32_Shdr *sechdrs,
425f18b51ccSRalf Baechle 		      const char *strtab,
426e01402b1SRalf Baechle 		      unsigned int symindex,
427e01402b1SRalf Baechle 		      unsigned int relsec,
428e01402b1SRalf Baechle 		      struct module *me)
429e01402b1SRalf Baechle {
430e01402b1SRalf Baechle 	Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
431e01402b1SRalf Baechle 	Elf32_Sym *sym;
432e01402b1SRalf Baechle 	uint32_t *location;
433e01402b1SRalf Baechle 	unsigned int i;
434e01402b1SRalf Baechle 	Elf32_Addr v;
435e01402b1SRalf Baechle 	int res;
436e01402b1SRalf Baechle 
437e01402b1SRalf Baechle 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
438e01402b1SRalf Baechle 		Elf32_Word r_info = rel[i].r_info;
439e01402b1SRalf Baechle 
440e01402b1SRalf Baechle 		/* This is where to make the change */
441e01402b1SRalf Baechle 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
442e01402b1SRalf Baechle 			+ rel[i].r_offset;
443e01402b1SRalf Baechle 		/* This is the symbol it is referring to */
444e01402b1SRalf Baechle 		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
445e01402b1SRalf Baechle 			+ ELF32_R_SYM(r_info);
446e01402b1SRalf Baechle 
447e01402b1SRalf Baechle 		if (!sym->st_value) {
448e01402b1SRalf Baechle 			pr_debug("%s: undefined weak symbol %s\n",
4495792bf64SSteven J. Hill 				 me->name, strtab + sym->st_name);
450e01402b1SRalf Baechle 			/* just print the warning, dont barf */
451e01402b1SRalf Baechle 		}
452e01402b1SRalf Baechle 
453e01402b1SRalf Baechle 		v = sym->st_value;
454e01402b1SRalf Baechle 
455e01402b1SRalf Baechle 		res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
456e01402b1SRalf Baechle 		if (res) {
457e01402b1SRalf Baechle 			char *r = rstrs[ELF32_R_TYPE(r_info)];
4582600990eSRalf Baechle 			pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
4595792bf64SSteven J. Hill 				rel[i].r_offset, r ? r : "UNKNOWN",
4602600990eSRalf Baechle 				strtab + sym->st_name);
4612600990eSRalf Baechle 			return res;
462e01402b1SRalf Baechle 		}
463e01402b1SRalf Baechle 	}
4642600990eSRalf Baechle 
465e01402b1SRalf Baechle 	return 0;
466e01402b1SRalf Baechle }
467e01402b1SRalf Baechle 
save_gp_address(unsigned int secbase,unsigned int rel)468e01402b1SRalf Baechle static inline void save_gp_address(unsigned int secbase, unsigned int rel)
469f18b51ccSRalf Baechle {
470e01402b1SRalf Baechle 	gp_addr = secbase + rel;
471e01402b1SRalf Baechle 	gp_offs = gp_addr - (secbase & 0xffff0000);
472e01402b1SRalf Baechle }
473e01402b1SRalf Baechle /* end module-elf32.c */
474e01402b1SRalf Baechle 
475e01402b1SRalf Baechle /* Change all symbols so that sh_value encodes the pointer directly. */
simplify_symbols(Elf_Shdr * sechdrs,unsigned int symindex,const char * strtab,const char * secstrings,unsigned int nsecs,struct module * mod)476e01402b1SRalf Baechle static void simplify_symbols(Elf_Shdr *sechdrs,
4772600990eSRalf Baechle 			    unsigned int symindex,
478e01402b1SRalf Baechle 			    const char *strtab,
479e01402b1SRalf Baechle 			    const char *secstrings,
480e01402b1SRalf Baechle 			    unsigned int nsecs, struct module *mod)
481e01402b1SRalf Baechle {
482e01402b1SRalf Baechle 	Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
483e01402b1SRalf Baechle 	unsigned long secbase, bssbase = 0;
484e01402b1SRalf Baechle 	unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
485e01402b1SRalf Baechle 	int size;
4862600990eSRalf Baechle 
487e01402b1SRalf Baechle 	/* find the .bss section for COMMON symbols */
488e01402b1SRalf Baechle 	for (i = 0; i < nsecs; i++) {
489e01402b1SRalf Baechle 		if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
4902600990eSRalf Baechle 			bssbase = sechdrs[i].sh_addr;
491e01402b1SRalf Baechle 			break;
4922600990eSRalf Baechle 		}
4932600990eSRalf Baechle 	}
494e01402b1SRalf Baechle 
495e01402b1SRalf Baechle 	for (i = 1; i < n; i++) {
496e01402b1SRalf Baechle 		switch (sym[i].st_shndx) {
497e01402b1SRalf Baechle 		case SHN_COMMON:
498e01402b1SRalf Baechle 			/* Allocate space for the symbol in the .bss section.
4992600990eSRalf Baechle 			   st_value is currently size.
5002600990eSRalf Baechle 			   We want it to have the address of the symbol. */
501e01402b1SRalf Baechle 
502e01402b1SRalf Baechle 			size = sym[i].st_value;
503e01402b1SRalf Baechle 			sym[i].st_value = bssbase;
504e01402b1SRalf Baechle 
505e01402b1SRalf Baechle 			bssbase += size;
506e01402b1SRalf Baechle 			break;
507e01402b1SRalf Baechle 
508e01402b1SRalf Baechle 		case SHN_ABS:
509e01402b1SRalf Baechle 			/* Don't need to do anything */
510e01402b1SRalf Baechle 			break;
511e01402b1SRalf Baechle 
512e01402b1SRalf Baechle 		case SHN_UNDEF:
513e01402b1SRalf Baechle 			/* ret = -ENOENT; */
514e01402b1SRalf Baechle 			break;
515e01402b1SRalf Baechle 
516e01402b1SRalf Baechle 		case SHN_MIPS_SCOMMON:
517e01402b1SRalf Baechle 			pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
5185792bf64SSteven J. Hill 				 strtab + sym[i].st_name, sym[i].st_shndx);
5195792bf64SSteven J. Hill 			/* .sbss section */
5205792bf64SSteven J. Hill 			break;
521e01402b1SRalf Baechle 
522e01402b1SRalf Baechle 		default:
523e01402b1SRalf Baechle 			secbase = sechdrs[sym[i].st_shndx].sh_addr;
524e01402b1SRalf Baechle 
525e01402b1SRalf Baechle 			if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
5265792bf64SSteven J. Hill 				save_gp_address(secbase, sym[i].st_value);
527e01402b1SRalf Baechle 
528e01402b1SRalf Baechle 			sym[i].st_value += secbase;
529e01402b1SRalf Baechle 			break;
530e01402b1SRalf Baechle 		}
531e01402b1SRalf Baechle 	}
532e01402b1SRalf Baechle }
533e01402b1SRalf Baechle 
534e01402b1SRalf Baechle #ifdef DEBUG_ELFLOADER
dump_elfsymbols(Elf_Shdr * sechdrs,unsigned int symindex,const char * strtab,struct module * mod)535e01402b1SRalf Baechle static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
536e01402b1SRalf Baechle 			    const char *strtab, struct module *mod)
537e01402b1SRalf Baechle {
538e01402b1SRalf Baechle 	Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
539e01402b1SRalf Baechle 	unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
540e01402b1SRalf Baechle 
541e01402b1SRalf Baechle 	pr_debug("dump_elfsymbols: n %d\n", n);
5425792bf64SSteven J. Hill 	for (i = 1; i < n; i++) {
543e01402b1SRalf Baechle 		pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
5445792bf64SSteven J. Hill 			 sym[i].st_value);
5455792bf64SSteven J. Hill 	}
546e01402b1SRalf Baechle }
547e01402b1SRalf Baechle #endif
548e01402b1SRalf Baechle 
find_vpe_symbols(struct vpe * v,Elf_Shdr * sechdrs,unsigned int symindex,const char * strtab,struct module * mod)549e01402b1SRalf Baechle static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
5502600990eSRalf Baechle 				      unsigned int symindex, const char *strtab,
551e01402b1SRalf Baechle 				      struct module *mod)
552e01402b1SRalf Baechle {
553e01402b1SRalf Baechle 	Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
554e01402b1SRalf Baechle 	unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
555e01402b1SRalf Baechle 
556e01402b1SRalf Baechle 	for (i = 1; i < n; i++) {
557e01402b1SRalf Baechle 		if (strcmp(strtab + sym[i].st_name, "__start") == 0)
5585792bf64SSteven J. Hill 			v->__start = sym[i].st_value;
559e01402b1SRalf Baechle 
560e01402b1SRalf Baechle 		if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
5615792bf64SSteven J. Hill 			v->shared_ptr = (void *)sym[i].st_value;
562e01402b1SRalf Baechle 	}
563e01402b1SRalf Baechle 
564e01402b1SRalf Baechle 	if ((v->__start == 0) || (v->shared_ptr == NULL))
5652600990eSRalf Baechle 		return -1;
5662600990eSRalf Baechle 
5672600990eSRalf Baechle 	return 0;
568e01402b1SRalf Baechle }
569e01402b1SRalf Baechle 
570e01402b1SRalf Baechle /*
571307bd284SRalf Baechle  * Allocates a VPE with some program code space(the load address), copies the
5722600990eSRalf Baechle  * contents of the program (p)buffer performing relocatations/etc, free's it
5732600990eSRalf Baechle  * when finished.
5742600990eSRalf Baechle  */
vpe_elfload(struct vpe * v)575e01402b1SRalf Baechle static int vpe_elfload(struct vpe *v)
576be6e1437SRalf Baechle {
577e01402b1SRalf Baechle 	Elf_Ehdr *hdr;
578e01402b1SRalf Baechle 	Elf_Shdr *sechdrs;
579e01402b1SRalf Baechle 	long err = 0;
580e01402b1SRalf Baechle 	char *secstrings, *strtab = NULL;
581e01402b1SRalf Baechle 	unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
5822600990eSRalf Baechle 	struct module mod; /* so we can re-use the relocations code */
5835792bf64SSteven J. Hill 
584e01402b1SRalf Baechle 	memset(&mod, 0, sizeof(struct module));
585e01402b1SRalf Baechle 	strcpy(mod.name, "VPE loader");
5862600990eSRalf Baechle 
587e01402b1SRalf Baechle 	hdr = (Elf_Ehdr *) v->pbuffer;
588e01402b1SRalf Baechle 	len = v->plen;
589e01402b1SRalf Baechle 
590e01402b1SRalf Baechle 	/* Sanity checks against insmoding binaries or wrong arch,
591e01402b1SRalf Baechle 	   weird elf version */
592e01402b1SRalf Baechle 	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
593d303f4a1SCyrill Gorcunov 	    || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
5942600990eSRalf Baechle 	    || !elf_check_arch(hdr)
5952600990eSRalf Baechle 	    || hdr->e_shentsize != sizeof(*sechdrs)) {
596e01402b1SRalf Baechle 		pr_warn("VPE loader: program wrong arch or weird elf version\n");
5975792bf64SSteven J. Hill 
598e01402b1SRalf Baechle 		return -ENOEXEC;
599e01402b1SRalf Baechle 	}
600e01402b1SRalf Baechle 
601e01402b1SRalf Baechle 	if (hdr->e_type == ET_REL)
6022600990eSRalf Baechle 		relocate = 1;
6032600990eSRalf Baechle 
6042600990eSRalf Baechle 	if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
605e01402b1SRalf Baechle 		pr_err("VPE loader: program length %u truncated\n", len);
6065792bf64SSteven J. Hill 
6072600990eSRalf Baechle 		return -ENOEXEC;
608e01402b1SRalf Baechle 	}
609e01402b1SRalf Baechle 
610e01402b1SRalf Baechle 	/* Convenience variables */
611e01402b1SRalf Baechle 	sechdrs = (void *)hdr + hdr->e_shoff;
612e01402b1SRalf Baechle 	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
613e01402b1SRalf Baechle 	sechdrs[0].sh_addr = 0;
614e01402b1SRalf Baechle 
615e01402b1SRalf Baechle 	/* And these should exist, but gcc whinges if we don't init them */
616e01402b1SRalf Baechle 	symindex = strindex = 0;
617e01402b1SRalf Baechle 
618e01402b1SRalf Baechle 	if (relocate) {
6192600990eSRalf Baechle 		for (i = 1; i < hdr->e_shnum; i++) {
620e01402b1SRalf Baechle 			if ((sechdrs[i].sh_type != SHT_NOBITS) &&
6215792bf64SSteven J. Hill 			    (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
6225792bf64SSteven J. Hill 				pr_err("VPE program length %u truncated\n",
6235792bf64SSteven J. Hill 				       len);
624e01402b1SRalf Baechle 				return -ENOEXEC;
625e01402b1SRalf Baechle 			}
626e01402b1SRalf Baechle 
627e01402b1SRalf Baechle 			/* Mark all sections sh_addr with their address in the
628e01402b1SRalf Baechle 			   temporary image. */
629e01402b1SRalf Baechle 			sechdrs[i].sh_addr = (size_t) hdr +
6305792bf64SSteven J. Hill 				sechdrs[i].sh_offset;
6315792bf64SSteven J. Hill 
632e01402b1SRalf Baechle 			/* Internal symbols and strings. */
633e01402b1SRalf Baechle 			if (sechdrs[i].sh_type == SHT_SYMTAB) {
634e01402b1SRalf Baechle 				symindex = i;
635e01402b1SRalf Baechle 				strindex = sechdrs[i].sh_link;
636e01402b1SRalf Baechle 				strtab = (char *)hdr +
6375792bf64SSteven J. Hill 					sechdrs[strindex].sh_offset;
6385792bf64SSteven J. Hill 			}
639e01402b1SRalf Baechle 		}
640e01402b1SRalf Baechle 		layout_sections(&mod, hdr, sechdrs, secstrings);
641e01402b1SRalf Baechle 	}
6422600990eSRalf Baechle 
643e01402b1SRalf Baechle 	v->load_addr = alloc_progmem(mod.mem[MOD_TEXT].size);
6447523e4dcSRusty Russell 	if (!v->load_addr)
6455408c490SRalf Baechle 		return -ENOMEM;
6465408c490SRalf Baechle 
647e01402b1SRalf Baechle 	pr_info("VPE loader: loading to %p\n", v->load_addr);
6485408c490SRalf Baechle 
649e01402b1SRalf Baechle 	if (relocate) {
6502600990eSRalf Baechle 		for (i = 0; i < hdr->e_shnum; i++) {
651e01402b1SRalf Baechle 			void *dest;
652e01402b1SRalf Baechle 
653e01402b1SRalf Baechle 			if (!(sechdrs[i].sh_flags & SHF_ALLOC))
654e01402b1SRalf Baechle 				continue;
655e01402b1SRalf Baechle 
656e01402b1SRalf Baechle 			dest = v->load_addr + sechdrs[i].sh_entsize;
657e01402b1SRalf Baechle 
658e01402b1SRalf Baechle 			if (sechdrs[i].sh_type != SHT_NOBITS)
659e01402b1SRalf Baechle 				memcpy(dest, (void *)sechdrs[i].sh_addr,
660e01402b1SRalf Baechle 				       sechdrs[i].sh_size);
661e01402b1SRalf Baechle 			/* Update sh_addr to point to copy in image. */
662e01402b1SRalf Baechle 			sechdrs[i].sh_addr = (unsigned long)dest;
663e01402b1SRalf Baechle 
6642600990eSRalf Baechle 			pr_debug(" section sh_name %s sh_addr 0x%x\n",
6655792bf64SSteven J. Hill 				 secstrings + sechdrs[i].sh_name,
6665792bf64SSteven J. Hill 				 sechdrs[i].sh_addr);
6675792bf64SSteven J. Hill 		}
668e01402b1SRalf Baechle 
669e01402b1SRalf Baechle 		/* Fix up syms, so that st_value is a pointer to location. */
670e01402b1SRalf Baechle 		simplify_symbols(sechdrs, symindex, strtab, secstrings,
671e01402b1SRalf Baechle 				 hdr->e_shnum, &mod);
672e01402b1SRalf Baechle 
673e01402b1SRalf Baechle 		/* Now do relocations. */
674e01402b1SRalf Baechle 		for (i = 1; i < hdr->e_shnum; i++) {
675e01402b1SRalf Baechle 			const char *strtab = (char *)sechdrs[strindex].sh_addr;
676e01402b1SRalf Baechle 			unsigned int info = sechdrs[i].sh_info;
677e01402b1SRalf Baechle 
678e01402b1SRalf Baechle 			/* Not a valid relocation section? */
679e01402b1SRalf Baechle 			if (info >= hdr->e_shnum)
680e01402b1SRalf Baechle 				continue;
681e01402b1SRalf Baechle 
682e01402b1SRalf Baechle 			/* Don't bother with non-allocated sections */
683e01402b1SRalf Baechle 			if (!(sechdrs[info].sh_flags & SHF_ALLOC))
684e01402b1SRalf Baechle 				continue;
685e01402b1SRalf Baechle 
686e01402b1SRalf Baechle 			if (sechdrs[i].sh_type == SHT_REL)
687e01402b1SRalf Baechle 				err = apply_relocations(sechdrs, strtab,
6885792bf64SSteven J. Hill 							symindex, i, &mod);
6895792bf64SSteven J. Hill 			else if (sechdrs[i].sh_type == SHT_RELA)
690e01402b1SRalf Baechle 				err = apply_relocate_add(sechdrs, strtab,
6915792bf64SSteven J. Hill 							 symindex, i, &mod);
6925792bf64SSteven J. Hill 			if (err < 0)
6932600990eSRalf Baechle 				return err;
6942600990eSRalf Baechle 
6952600990eSRalf Baechle 		}
6962600990eSRalf Baechle 	} else {
6972600990eSRalf Baechle 		struct elf_phdr *phdr = (struct elf_phdr *)
6985792bf64SSteven J. Hill 						((char *)hdr + hdr->e_phoff);
6995792bf64SSteven J. Hill 
7002600990eSRalf Baechle 		for (i = 0; i < hdr->e_phnum; i++) {
701bdf5d42cSRalf Baechle 			if (phdr->p_type == PT_LOAD) {
702b618336aSKevin D. Kissell 				memcpy((void *)phdr->p_paddr,
703b618336aSKevin D. Kissell 				       (char *)hdr + phdr->p_offset,
704b618336aSKevin D. Kissell 				       phdr->p_filesz);
705b618336aSKevin D. Kissell 				memset((void *)phdr->p_paddr + phdr->p_filesz,
706b618336aSKevin D. Kissell 				       0, phdr->p_memsz - phdr->p_filesz);
707b618336aSKevin D. Kissell 		    }
708b618336aSKevin D. Kissell 		    phdr++;
709bdf5d42cSRalf Baechle 		}
710bdf5d42cSRalf Baechle 
711bdf5d42cSRalf Baechle 		for (i = 0; i < hdr->e_shnum; i++) {
712bdf5d42cSRalf Baechle 			/* Internal symbols and strings. */
7132600990eSRalf Baechle 			if (sechdrs[i].sh_type == SHT_SYMTAB) {
7142600990eSRalf Baechle 				symindex = i;
7152600990eSRalf Baechle 				strindex = sechdrs[i].sh_link;
7162600990eSRalf Baechle 				strtab = (char *)hdr +
7175792bf64SSteven J. Hill 					sechdrs[strindex].sh_offset;
7185792bf64SSteven J. Hill 
7192600990eSRalf Baechle 				/*
7205792bf64SSteven J. Hill 				 * mark symtab's address for when we try
7215792bf64SSteven J. Hill 				 * to find the magic symbols
7225792bf64SSteven J. Hill 				 */
7235792bf64SSteven J. Hill 				sechdrs[i].sh_addr = (size_t) hdr +
7245792bf64SSteven J. Hill 					sechdrs[i].sh_offset;
7255792bf64SSteven J. Hill 			}
7262600990eSRalf Baechle 		}
727e01402b1SRalf Baechle 	}
728e01402b1SRalf Baechle 
729e01402b1SRalf Baechle 	/* make sure it's physically written out */
730e01402b1SRalf Baechle 	flush_icache_range((unsigned long)v->load_addr,
731e01402b1SRalf Baechle 			   (unsigned long)v->load_addr + v->len);
732e01402b1SRalf Baechle 
733e01402b1SRalf Baechle 	if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
734e01402b1SRalf Baechle 		if (v->__start == 0) {
7352600990eSRalf Baechle 			pr_warn("VPE loader: program does not contain a __start symbol\n");
7365792bf64SSteven J. Hill 			return -ENOEXEC;
7372600990eSRalf Baechle 		}
7382600990eSRalf Baechle 
739e01402b1SRalf Baechle 		if (v->shared_ptr == NULL)
7402600990eSRalf Baechle 			pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
7415792bf64SSteven J. Hill 				" Unable to use AMVP (AP/SP) facilities.\n");
7422600990eSRalf Baechle 	}
743e01402b1SRalf Baechle 
744e01402b1SRalf Baechle 	pr_info(" elf loaded\n");
7455792bf64SSteven J. Hill 	return 0;
7462600990eSRalf Baechle }
747e01402b1SRalf Baechle 
748e01402b1SRalf Baechle /* checks VPE is unused and gets ready to load program	*/
vpe_open(struct inode * inode,struct file * filp)7492600990eSRalf Baechle static int vpe_open(struct inode *inode, struct file *filp)
7502600990eSRalf Baechle {
7512600990eSRalf Baechle 	enum vpe_state state;
752c4c4018bSRalf Baechle 	struct vpe_notifications *notifier;
7535792bf64SSteven J. Hill 	struct vpe *v;
75407cc0c9eSRalf Baechle 
7552600990eSRalf Baechle 	if (VPE_MODULE_MINOR != iminor(inode)) {
7561a2a6d7eSDeng-Cheng Zhu 		/* assume only 1 device at the moment. */
75707cc0c9eSRalf Baechle 		pr_warn("VPE loader: only vpe1 is supported\n");
7585792bf64SSteven J. Hill 
7591bbfc20dSRalf Baechle 		return -ENODEV;
7601bbfc20dSRalf Baechle 	}
7612600990eSRalf Baechle 
7622600990eSRalf Baechle 	v = get_vpe(aprp_cpu_index());
7635792bf64SSteven J. Hill 	if (v == NULL) {
7645792bf64SSteven J. Hill 		pr_warn("VPE loader: unable to get vpe\n");
7655792bf64SSteven J. Hill 
7661bbfc20dSRalf Baechle 		return -ENODEV;
7671bbfc20dSRalf Baechle 	}
7682600990eSRalf Baechle 
7692600990eSRalf Baechle 	state = xchg(&v->state, VPE_STATE_INUSE);
770c4c4018bSRalf Baechle 	if (state != VPE_STATE_UNUSED) {
771c4c4018bSRalf Baechle 		pr_debug("VPE loader: tc in use dumping regs\n");
7725792bf64SSteven J. Hill 
7732600990eSRalf Baechle 		list_for_each_entry(notifier, &v->notify, list)
7745792bf64SSteven J. Hill 			notifier->stop(aprp_cpu_index());
7755792bf64SSteven J. Hill 
7762600990eSRalf Baechle 		release_progmem(v->load_addr);
7772600990eSRalf Baechle 		cleanup_tc(get_tc(aprp_cpu_index()));
7781a2a6d7eSDeng-Cheng Zhu 	}
779e01402b1SRalf Baechle 
780e01402b1SRalf Baechle 	/* this of-course trashes what was there before... */
781e01402b1SRalf Baechle 	v->pbuffer = vmalloc(P_SIZE);
782e01402b1SRalf Baechle 	if (!v->pbuffer) {
783863abad4SJesper Juhl 		pr_warn("VPE loader: unable to allocate memory\n");
7845792bf64SSteven J. Hill 		return -ENOMEM;
785863abad4SJesper Juhl 	}
786863abad4SJesper Juhl 	v->plen = P_SIZE;
787e01402b1SRalf Baechle 	v->load_addr = NULL;
788e01402b1SRalf Baechle 	v->len = 0;
789e01402b1SRalf Baechle 	v->shared_ptr = NULL;
7902600990eSRalf Baechle 	v->__start = 0;
7912600990eSRalf Baechle 
79207cc0c9eSRalf Baechle 	return 0;
793e01402b1SRalf Baechle }
794e01402b1SRalf Baechle 
vpe_release(struct inode * inode,struct file * filp)795e01402b1SRalf Baechle static int vpe_release(struct inode *inode, struct file *filp)
796e01402b1SRalf Baechle {
797e01402b1SRalf Baechle #ifdef CONFIG_MIPS_VPE_LOADER_MT
798*7fb6f7b0SThomas Bogendoerfer 	struct vpe *v;
799307bd284SRalf Baechle 	Elf_Ehdr *hdr;
800e01402b1SRalf Baechle 	int ret = 0;
80107cc0c9eSRalf Baechle 
802e01402b1SRalf Baechle 	v = get_vpe(aprp_cpu_index());
8031a2a6d7eSDeng-Cheng Zhu 	if (v == NULL)
80407cc0c9eSRalf Baechle 		return -ENODEV;
805e01402b1SRalf Baechle 
806e01402b1SRalf Baechle 	hdr = (Elf_Ehdr *) v->pbuffer;
807e01402b1SRalf Baechle 	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
808d303f4a1SCyrill Gorcunov 		if (vpe_elfload(v) >= 0) {
8091c205b9cSBjorn Helgaas 			vpe_run(v);
810e01402b1SRalf Baechle 		} else {
81107cc0c9eSRalf Baechle 			pr_warn("VPE loader: ELF load failed.\n");
8125792bf64SSteven J. Hill 			ret = -ENOEXEC;
813e01402b1SRalf Baechle 		}
814e01402b1SRalf Baechle 	} else {
815e01402b1SRalf Baechle 		pr_warn("VPE loader: only elf files are supported\n");
8165792bf64SSteven J. Hill 		ret = -ENOEXEC;
817e01402b1SRalf Baechle 	}
818e01402b1SRalf Baechle 
819e01402b1SRalf Baechle 	/* It's good to be able to run the SP and if it chokes have a look at
8202600990eSRalf Baechle 	   the /dev/rt?. But if we reset the pointer to the shared struct we
8212600990eSRalf Baechle 	   lose what has happened. So perhaps if garbage is sent to the vpe
8228ebcfc8bSNick Andrew 	   device, use it as a trigger for the reset. Hopefully a nice
8232600990eSRalf Baechle 	   executable will be along shortly. */
8242600990eSRalf Baechle 	if (ret < 0)
8252600990eSRalf Baechle 		v->shared_ptr = NULL;
8262600990eSRalf Baechle 
8272600990eSRalf Baechle 	vfree(v->pbuffer);
828e01402b1SRalf Baechle 	v->plen = 0;
829e01402b1SRalf Baechle 
830863abad4SJesper Juhl 	return ret;
831e01402b1SRalf Baechle #else
832c60f9944SBjorn Helgaas 	pr_warn("VPE loader: ELF load failed.\n");
833c60f9944SBjorn Helgaas 	return -ENOEXEC;
834c60f9944SBjorn Helgaas #endif
835c60f9944SBjorn Helgaas }
836e01402b1SRalf Baechle 
vpe_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)837e01402b1SRalf Baechle static ssize_t vpe_write(struct file *file, const char __user *buffer,
838e01402b1SRalf Baechle 			 size_t count, loff_t *ppos)
839e01402b1SRalf Baechle {
840e01402b1SRalf Baechle 	size_t ret = count;
841e01402b1SRalf Baechle 	struct vpe *v;
842307bd284SRalf Baechle 
843e01402b1SRalf Baechle 	if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
8441a2a6d7eSDeng-Cheng Zhu 		return -ENODEV;
84507cc0c9eSRalf Baechle 
84607cc0c9eSRalf Baechle 	v = get_vpe(aprp_cpu_index());
8471a2a6d7eSDeng-Cheng Zhu 
8485792bf64SSteven J. Hill 	if (v == NULL)
84907cc0c9eSRalf Baechle 		return -ENODEV;
850e01402b1SRalf Baechle 
851e01402b1SRalf Baechle 	if ((count + v->len) > v->plen) {
852e01402b1SRalf Baechle 		pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n");
853aae22f16SColin Ian King 		return -ENOMEM;
854e01402b1SRalf Baechle 	}
855e01402b1SRalf Baechle 
856e01402b1SRalf Baechle 	count -= copy_from_user(v->pbuffer + v->len, buffer, count);
857e01402b1SRalf Baechle 	if (!count)
8582600990eSRalf Baechle 		return -EFAULT;
859e01402b1SRalf Baechle 
860e01402b1SRalf Baechle 	v->len += count;
861e01402b1SRalf Baechle 	return ret;
862e01402b1SRalf Baechle }
863e01402b1SRalf Baechle 
864e01402b1SRalf Baechle const struct file_operations vpe_fops = {
8651a2a6d7eSDeng-Cheng Zhu 	.owner = THIS_MODULE,
866e01402b1SRalf Baechle 	.open = vpe_open,
867e01402b1SRalf Baechle 	.release = vpe_release,
868e01402b1SRalf Baechle 	.write = vpe_write,
8696038f373SArnd Bergmann 	.llseek = noop_llseek,
8706038f373SArnd Bergmann };
871e01402b1SRalf Baechle 
vpe_get_shared(int index)872e01402b1SRalf Baechle void *vpe_get_shared(int index)
873e01402b1SRalf Baechle {
874e01402b1SRalf Baechle 	struct vpe *v = get_vpe(index);
8755792bf64SSteven J. Hill 
876e01402b1SRalf Baechle 	if (v == NULL)
8775792bf64SSteven J. Hill 		return NULL;
878e01402b1SRalf Baechle 
879e01402b1SRalf Baechle 	return v->shared_ptr;
880e01402b1SRalf Baechle }
881e01402b1SRalf Baechle EXPORT_SYMBOL(vpe_get_shared);
882e01402b1SRalf Baechle 
vpe_notify(int index,struct vpe_notifications * notify)883e01402b1SRalf Baechle int vpe_notify(int index, struct vpe_notifications *notify)
8842600990eSRalf Baechle {
8852600990eSRalf Baechle 	struct vpe *v = get_vpe(index);
8865792bf64SSteven J. Hill 
8872600990eSRalf Baechle 	if (v == NULL)
8885792bf64SSteven J. Hill 		return -1;
8892600990eSRalf Baechle 
8902600990eSRalf Baechle 	list_add(&notify->list, &v->notify);
8912600990eSRalf Baechle 	return 0;
8922600990eSRalf Baechle }
8932600990eSRalf Baechle EXPORT_SYMBOL(vpe_notify);
8942600990eSRalf Baechle 
8952600990eSRalf Baechle module_init(vpe_module_init);
896e01402b1SRalf Baechle module_exit(vpe_module_exit);
897e01402b1SRalf Baechle MODULE_DESCRIPTION("MIPS VPE Loader");
898e01402b1SRalf Baechle MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
8992600990eSRalf Baechle MODULE_LICENSE("GPL");
900e01402b1SRalf Baechle