xref: /openbmc/linux/arch/powerpc/kernel/misc_64.S (revision 711aab1d)
1/*
2 * This file contains miscellaneous low-level functions.
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/sys.h>
18#include <asm/unistd.h>
19#include <asm/errno.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/cache.h>
23#include <asm/ppc_asm.h>
24#include <asm/asm-offsets.h>
25#include <asm/cputable.h>
26#include <asm/thread_info.h>
27#include <asm/kexec.h>
28#include <asm/ptrace.h>
29#include <asm/mmu.h>
30#include <asm/export.h>
31
32	.text
33
34_GLOBAL(call_do_softirq)
35	mflr	r0
36	std	r0,16(r1)
37	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
38	mr	r1,r3
39	bl	__do_softirq
40	ld	r1,0(r1)
41	ld	r0,16(r1)
42	mtlr	r0
43	blr
44
45_GLOBAL(call_do_irq)
46	mflr	r0
47	std	r0,16(r1)
48	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
49	mr	r1,r4
50	bl	__do_irq
51	ld	r1,0(r1)
52	ld	r0,16(r1)
53	mtlr	r0
54	blr
55
56	.section	".toc","aw"
57PPC64_CACHES:
58	.tc		ppc64_caches[TC],ppc64_caches
59	.section	".text"
60
61/*
62 * Write any modified data cache blocks out to memory
63 * and invalidate the corresponding instruction cache blocks.
64 *
65 * flush_icache_range(unsigned long start, unsigned long stop)
66 *
67 *   flush all bytes from start through stop-1 inclusive
68 */
69
70_GLOBAL_TOC(flush_icache_range)
71BEGIN_FTR_SECTION
72	PURGE_PREFETCHED_INS
73	blr
74END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
75/*
76 * Flush the data cache to memory
77 *
78 * Different systems have different cache line sizes
79 * and in some cases i-cache and d-cache line sizes differ from
80 * each other.
81 */
82 	ld	r10,PPC64_CACHES@toc(r2)
83	lwz	r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
84	addi	r5,r7,-1
85	andc	r6,r3,r5		/* round low to line bdy */
86	subf	r8,r6,r4		/* compute length */
87	add	r8,r8,r5		/* ensure we get enough */
88	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of cache block size */
89	srw.	r8,r8,r9		/* compute line count */
90	beqlr				/* nothing to do? */
91	mtctr	r8
921:	dcbst	0,r6
93	add	r6,r6,r7
94	bdnz	1b
95	sync
96
97/* Now invalidate the instruction cache */
98
99	lwz	r7,ICACHEL1BLOCKSIZE(r10)	/* Get Icache block size */
100	addi	r5,r7,-1
101	andc	r6,r3,r5		/* round low to line bdy */
102	subf	r8,r6,r4		/* compute length */
103	add	r8,r8,r5
104	lwz	r9,ICACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of Icache block size */
105	srw.	r8,r8,r9		/* compute line count */
106	beqlr				/* nothing to do? */
107	mtctr	r8
1082:	icbi	0,r6
109	add	r6,r6,r7
110	bdnz	2b
111	isync
112	blr
113_ASM_NOKPROBE_SYMBOL(flush_icache_range)
114EXPORT_SYMBOL(flush_icache_range)
115
116/*
117 * Like above, but only do the D-cache.
118 *
119 * flush_dcache_range(unsigned long start, unsigned long stop)
120 *
121 *    flush all bytes from start to stop-1 inclusive
122 */
123_GLOBAL_TOC(flush_dcache_range)
124
125/*
126 * Flush the data cache to memory
127 *
128 * Different systems have different cache line sizes
129 */
130 	ld	r10,PPC64_CACHES@toc(r2)
131	lwz	r7,DCACHEL1BLOCKSIZE(r10)	/* Get dcache block size */
132	addi	r5,r7,-1
133	andc	r6,r3,r5		/* round low to line bdy */
134	subf	r8,r6,r4		/* compute length */
135	add	r8,r8,r5		/* ensure we get enough */
136	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of dcache block size */
137	srw.	r8,r8,r9		/* compute line count */
138	beqlr				/* nothing to do? */
139	mtctr	r8
1400:	dcbst	0,r6
141	add	r6,r6,r7
142	bdnz	0b
143	sync
144	blr
145EXPORT_SYMBOL(flush_dcache_range)
146
147/*
148 * Like above, but works on non-mapped physical addresses.
149 * Use only for non-LPAR setups ! It also assumes real mode
150 * is cacheable. Used for flushing out the DART before using
151 * it as uncacheable memory
152 *
153 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
154 *
155 *    flush all bytes from start to stop-1 inclusive
156 */
157_GLOBAL(flush_dcache_phys_range)
158 	ld	r10,PPC64_CACHES@toc(r2)
159	lwz	r7,DCACHEL1BLOCKSIZE(r10)	/* Get dcache block size */
160	addi	r5,r7,-1
161	andc	r6,r3,r5		/* round low to line bdy */
162	subf	r8,r6,r4		/* compute length */
163	add	r8,r8,r5		/* ensure we get enough */
164	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of dcache block size */
165	srw.	r8,r8,r9		/* compute line count */
166	beqlr				/* nothing to do? */
167	mfmsr	r5			/* Disable MMU Data Relocation */
168	ori	r0,r5,MSR_DR
169	xori	r0,r0,MSR_DR
170	sync
171	mtmsr	r0
172	sync
173	isync
174	mtctr	r8
1750:	dcbst	0,r6
176	add	r6,r6,r7
177	bdnz	0b
178	sync
179	isync
180	mtmsr	r5			/* Re-enable MMU Data Relocation */
181	sync
182	isync
183	blr
184
185_GLOBAL(flush_inval_dcache_range)
186 	ld	r10,PPC64_CACHES@toc(r2)
187	lwz	r7,DCACHEL1BLOCKSIZE(r10)	/* Get dcache block size */
188	addi	r5,r7,-1
189	andc	r6,r3,r5		/* round low to line bdy */
190	subf	r8,r6,r4		/* compute length */
191	add	r8,r8,r5		/* ensure we get enough */
192	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
193	srw.	r8,r8,r9		/* compute line count */
194	beqlr				/* nothing to do? */
195	sync
196	isync
197	mtctr	r8
1980:	dcbf	0,r6
199	add	r6,r6,r7
200	bdnz	0b
201	sync
202	isync
203	blr
204
205
206/*
207 * Flush a particular page from the data cache to RAM.
208 * Note: this is necessary because the instruction cache does *not*
209 * snoop from the data cache.
210 *
211 *	void __flush_dcache_icache(void *page)
212 */
213_GLOBAL(__flush_dcache_icache)
214/*
215 * Flush the data cache to memory
216 *
217 * Different systems have different cache line sizes
218 */
219
220BEGIN_FTR_SECTION
221	PURGE_PREFETCHED_INS
222	blr
223END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
224
225/* Flush the dcache */
226 	ld	r7,PPC64_CACHES@toc(r2)
227	clrrdi	r3,r3,PAGE_SHIFT           	    /* Page align */
228	lwz	r4,DCACHEL1BLOCKSPERPAGE(r7)	/* Get # dcache blocks per page */
229	lwz	r5,DCACHEL1BLOCKSIZE(r7)	/* Get dcache block size */
230	mr	r6,r3
231	mtctr	r4
2320:	dcbst	0,r6
233	add	r6,r6,r5
234	bdnz	0b
235	sync
236
237/* Now invalidate the icache */
238
239	lwz	r4,ICACHEL1BLOCKSPERPAGE(r7)	/* Get # icache blocks per page */
240	lwz	r5,ICACHEL1BLOCKSIZE(r7)	/* Get icache block size */
241	mtctr	r4
2421:	icbi	0,r3
243	add	r3,r3,r5
244	bdnz	1b
245	isync
246	blr
247
248_GLOBAL(__bswapdi2)
249EXPORT_SYMBOL(__bswapdi2)
250	srdi	r8,r3,32
251	rlwinm	r7,r3,8,0xffffffff
252	rlwimi	r7,r3,24,0,7
253	rlwinm	r9,r8,8,0xffffffff
254	rlwimi	r7,r3,24,16,23
255	rlwimi	r9,r8,24,0,7
256	rlwimi	r9,r8,24,16,23
257	sldi	r7,r7,32
258	or	r3,r7,r9
259	blr
260
261
262#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
263_GLOBAL(rmci_on)
264	sync
265	isync
266	li	r3,0x100
267	rldicl	r3,r3,32,0
268	mfspr	r5,SPRN_HID4
269	or	r5,r5,r3
270	sync
271	mtspr	SPRN_HID4,r5
272	isync
273	slbia
274	isync
275	sync
276	blr
277
278_GLOBAL(rmci_off)
279	sync
280	isync
281	li	r3,0x100
282	rldicl	r3,r3,32,0
283	mfspr	r5,SPRN_HID4
284	andc	r5,r5,r3
285	sync
286	mtspr	SPRN_HID4,r5
287	isync
288	slbia
289	isync
290	sync
291	blr
292#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
293
294#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
295
296/*
297 * Do an IO access in real mode
298 */
299_GLOBAL(real_readb)
300	mfmsr	r7
301	ori	r0,r7,MSR_DR
302	xori	r0,r0,MSR_DR
303	sync
304	mtmsrd	r0
305	sync
306	isync
307	mfspr	r6,SPRN_HID4
308	rldicl	r5,r6,32,0
309	ori	r5,r5,0x100
310	rldicl	r5,r5,32,0
311	sync
312	mtspr	SPRN_HID4,r5
313	isync
314	slbia
315	isync
316	lbz	r3,0(r3)
317	sync
318	mtspr	SPRN_HID4,r6
319	isync
320	slbia
321	isync
322	mtmsrd	r7
323	sync
324	isync
325	blr
326
327	/*
328 * Do an IO access in real mode
329 */
330_GLOBAL(real_writeb)
331	mfmsr	r7
332	ori	r0,r7,MSR_DR
333	xori	r0,r0,MSR_DR
334	sync
335	mtmsrd	r0
336	sync
337	isync
338	mfspr	r6,SPRN_HID4
339	rldicl	r5,r6,32,0
340	ori	r5,r5,0x100
341	rldicl	r5,r5,32,0
342	sync
343	mtspr	SPRN_HID4,r5
344	isync
345	slbia
346	isync
347	stb	r3,0(r4)
348	sync
349	mtspr	SPRN_HID4,r6
350	isync
351	slbia
352	isync
353	mtmsrd	r7
354	sync
355	isync
356	blr
357#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
358
359#ifdef CONFIG_PPC_PASEMI
360
361_GLOBAL(real_205_readb)
362	mfmsr	r7
363	ori	r0,r7,MSR_DR
364	xori	r0,r0,MSR_DR
365	sync
366	mtmsrd	r0
367	sync
368	isync
369	LBZCIX(R3,R0,R3)
370	isync
371	mtmsrd	r7
372	sync
373	isync
374	blr
375
376_GLOBAL(real_205_writeb)
377	mfmsr	r7
378	ori	r0,r7,MSR_DR
379	xori	r0,r0,MSR_DR
380	sync
381	mtmsrd	r0
382	sync
383	isync
384	STBCIX(R3,R0,R4)
385	isync
386	mtmsrd	r7
387	sync
388	isync
389	blr
390
391#endif /* CONFIG_PPC_PASEMI */
392
393
394#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
395/*
396 * SCOM access functions for 970 (FX only for now)
397 *
398 * unsigned long scom970_read(unsigned int address);
399 * void scom970_write(unsigned int address, unsigned long value);
400 *
401 * The address passed in is the 24 bits register address. This code
402 * is 970 specific and will not check the status bits, so you should
403 * know what you are doing.
404 */
405_GLOBAL(scom970_read)
406	/* interrupts off */
407	mfmsr	r4
408	ori	r0,r4,MSR_EE
409	xori	r0,r0,MSR_EE
410	mtmsrd	r0,1
411
412	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
413	 * (including parity). On current CPUs they must be 0'd,
414	 * and finally or in RW bit
415	 */
416	rlwinm	r3,r3,8,0,15
417	ori	r3,r3,0x8000
418
419	/* do the actual scom read */
420	sync
421	mtspr	SPRN_SCOMC,r3
422	isync
423	mfspr	r3,SPRN_SCOMD
424	isync
425	mfspr	r0,SPRN_SCOMC
426	isync
427
428	/* XXX:	fixup result on some buggy 970's (ouch ! we lost a bit, bah
429	 * that's the best we can do). Not implemented yet as we don't use
430	 * the scom on any of the bogus CPUs yet, but may have to be done
431	 * ultimately
432	 */
433
434	/* restore interrupts */
435	mtmsrd	r4,1
436	blr
437
438
439_GLOBAL(scom970_write)
440	/* interrupts off */
441	mfmsr	r5
442	ori	r0,r5,MSR_EE
443	xori	r0,r0,MSR_EE
444	mtmsrd	r0,1
445
446	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
447	 * (including parity). On current CPUs they must be 0'd.
448	 */
449
450	rlwinm	r3,r3,8,0,15
451
452	sync
453	mtspr	SPRN_SCOMD,r4      /* write data */
454	isync
455	mtspr	SPRN_SCOMC,r3      /* write command */
456	isync
457	mfspr	3,SPRN_SCOMC
458	isync
459
460	/* restore interrupts */
461	mtmsrd	r5,1
462	blr
463#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
464
465/* kexec_wait(phys_cpu)
466 *
467 * wait for the flag to change, indicating this kernel is going away but
468 * the slave code for the next one is at addresses 0 to 100.
469 *
470 * This is used by all slaves, even those that did not find a matching
471 * paca in the secondary startup code.
472 *
473 * Physical (hardware) cpu id should be in r3.
474 */
475_GLOBAL(kexec_wait)
476	bl	1f
4771:	mflr	r5
478	addi	r5,r5,kexec_flag-1b
479
48099:	HMT_LOW
481#ifdef CONFIG_KEXEC_CORE	/* use no memory without kexec */
482	lwz	r4,0(r5)
483	cmpwi	0,r4,0
484	beq	99b
485#ifdef CONFIG_PPC_BOOK3S_64
486	li	r10,0x60
487	mfmsr	r11
488	clrrdi	r11,r11,1	/* Clear MSR_LE */
489	mtsrr0	r10
490	mtsrr1	r11
491	rfid
492#else
493	/* Create TLB entry in book3e_secondary_core_init */
494	li	r4,0
495	ba	0x60
496#endif
497#endif
498
499/* this can be in text because we won't change it until we are
500 * running in real anyways
501 */
502kexec_flag:
503	.long	0
504
505
506#ifdef CONFIG_KEXEC_CORE
507#ifdef CONFIG_PPC_BOOK3E
508/*
509 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
510 * for a core to identity map v:0 to p:0.  This current implementation
511 * assumes that 1G is enough for kexec.
512 */
513kexec_create_tlb:
514	/*
515	 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
516	 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
517	 */
518	PPC_TLBILX_ALL(0,R0)
519	sync
520	isync
521
522	mfspr	r10,SPRN_TLB1CFG
523	andi.	r10,r10,TLBnCFG_N_ENTRY	/* Extract # entries */
524	subi	r10,r10,1	/* Last entry: no conflict with kernel text */
525	lis	r9,MAS0_TLBSEL(1)@h
526	rlwimi	r9,r10,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r9) */
527
528/* Set up a temp identity mapping v:0 to p:0 and return to it. */
529#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
530#define M_IF_NEEDED	MAS2_M
531#else
532#define M_IF_NEEDED	0
533#endif
534	mtspr	SPRN_MAS0,r9
535
536	lis	r9,(MAS1_VALID|MAS1_IPROT)@h
537	ori	r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
538	mtspr	SPRN_MAS1,r9
539
540	LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
541	mtspr	SPRN_MAS2,r9
542
543	LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
544	mtspr	SPRN_MAS3,r9
545	li	r9,0
546	mtspr	SPRN_MAS7,r9
547
548	tlbwe
549	isync
550	blr
551#endif
552
553/* kexec_smp_wait(void)
554 *
555 * call with interrupts off
556 * note: this is a terminal routine, it does not save lr
557 *
558 * get phys id from paca
559 * switch to real mode
560 * mark the paca as no longer used
561 * join other cpus in kexec_wait(phys_id)
562 */
563_GLOBAL(kexec_smp_wait)
564	lhz	r3,PACAHWCPUID(r13)
565	bl	real_mode
566
567	li	r4,KEXEC_STATE_REAL_MODE
568	stb	r4,PACAKEXECSTATE(r13)
569	SYNC
570
571	b	kexec_wait
572
573/*
574 * switch to real mode (turn mmu off)
575 * we use the early kernel trick that the hardware ignores bits
576 * 0 and 1 (big endian) of the effective address in real mode
577 *
578 * don't overwrite r3 here, it is live for kexec_wait above.
579 */
580real_mode:	/* assume normal blr return */
581#ifdef CONFIG_PPC_BOOK3E
582	/* Create an identity mapping. */
583	b	kexec_create_tlb
584#else
5851:	li	r9,MSR_RI
586	li	r10,MSR_DR|MSR_IR
587	mflr	r11		/* return address to SRR0 */
588	mfmsr	r12
589	andc	r9,r12,r9
590	andc	r10,r12,r10
591
592	mtmsrd	r9,1
593	mtspr	SPRN_SRR1,r10
594	mtspr	SPRN_SRR0,r11
595	rfid
596#endif
597
598/*
599 * kexec_sequence(newstack, start, image, control, clear_all(),
600	          copy_with_mmu_off)
601 *
602 * does the grungy work with stack switching and real mode switches
603 * also does simple calls to other code
604 */
605
606_GLOBAL(kexec_sequence)
607	mflr	r0
608	std	r0,16(r1)
609
610	/* switch stacks to newstack -- &kexec_stack.stack */
611	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
612	mr	r1,r3
613
614	li	r0,0
615	std	r0,16(r1)
616
617BEGIN_FTR_SECTION
618	/*
619	 * This is the best time to turn AMR/IAMR off.
620	 * key 0 is used in radix for supervisor<->user
621	 * protection, but on hash key 0 is reserved
622	 * ideally we want to enter with a clean state.
623	 * NOTE, we rely on r0 being 0 from above.
624	 */
625	mtspr	SPRN_IAMR,r0
626	mtspr	SPRN_AMOR,r0
627END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
628
629	/* save regs for local vars on new stack.
630	 * yes, we won't go back, but ...
631	 */
632	std	r31,-8(r1)
633	std	r30,-16(r1)
634	std	r29,-24(r1)
635	std	r28,-32(r1)
636	std	r27,-40(r1)
637	std	r26,-48(r1)
638	std	r25,-56(r1)
639
640	stdu	r1,-STACK_FRAME_OVERHEAD-64(r1)
641
642	/* save args into preserved regs */
643	mr	r31,r3			/* newstack (both) */
644	mr	r30,r4			/* start (real) */
645	mr	r29,r5			/* image (virt) */
646	mr	r28,r6			/* control, unused */
647	mr	r27,r7			/* clear_all() fn desc */
648	mr	r26,r8			/* copy_with_mmu_off */
649	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
650
651	/* disable interrupts, we are overwriting kernel data next */
652#ifdef CONFIG_PPC_BOOK3E
653	wrteei	0
654#else
655	mfmsr	r3
656	rlwinm	r3,r3,0,17,15
657	mtmsrd	r3,1
658#endif
659
660	/* We need to turn the MMU off unless we are in hash mode
661	 * under a hypervisor
662	 */
663	cmpdi	r26,0
664	beq	1f
665	bl	real_mode
6661:
667	/* copy dest pages, flush whole dest image */
668	mr	r3,r29
669	bl	kexec_copy_flush	/* (image) */
670
671	/* turn off mmu now if not done earlier */
672	cmpdi	r26,0
673	bne	1f
674	bl	real_mode
675
676	/* copy  0x100 bytes starting at start to 0 */
6771:	li	r3,0
678	mr	r4,r30		/* start, aka phys mem offset */
679	li	r5,0x100
680	li	r6,0
681	bl	copy_and_flush	/* (dest, src, copy limit, start offset) */
6821:	/* assume normal blr return */
683
684	/* release other cpus to the new kernel secondary start at 0x60 */
685	mflr	r5
686	li	r6,1
687	stw	r6,kexec_flag-1b(5)
688
689	cmpdi	r27,0
690	beq	1f
691
692	/* clear out hardware hash page table and tlb */
693#ifdef PPC64_ELF_ABI_v1
694	ld	r12,0(r27)		/* deref function descriptor */
695#else
696	mr	r12,r27
697#endif
698	mtctr	r12
699	bctrl				/* mmu_hash_ops.hpte_clear_all(void); */
700
701/*
702 *   kexec image calling is:
703 *      the first 0x100 bytes of the entry point are copied to 0
704 *
705 *      all slaves branch to slave = 0x60 (absolute)
706 *              slave(phys_cpu_id);
707 *
708 *      master goes to start = entry point
709 *              start(phys_cpu_id, start, 0);
710 *
711 *
712 *   a wrapper is needed to call existing kernels, here is an approximate
713 *   description of one method:
714 *
715 * v2: (2.6.10)
716 *   start will be near the boot_block (maybe 0x100 bytes before it?)
717 *   it will have a 0x60, which will b to boot_block, where it will wait
718 *   and 0 will store phys into struct boot-block and load r3 from there,
719 *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
720 *
721 * v1: (2.6.9)
722 *    boot block will have all cpus scanning device tree to see if they
723 *    are the boot cpu ?????
724 *    other device tree differences (prop sizes, va vs pa, etc)...
725 */
7261:	mr	r3,r25	# my phys cpu
727	mr	r4,r30	# start, aka phys mem offset
728	mtlr	4
729	li	r5,0
730	blr	/* image->start(physid, image->start, 0); */
731#endif /* CONFIG_KEXEC_CORE */
732