xref: /openbmc/linux/arch/powerpc/platforms/pseries/hvCall.S (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1/*
2 * This file contains the generic code to perform a call to the
3 * pSeries LPAR hypervisor.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10#include <asm/hvcall.h>
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/ptrace.h>
15
16#define STK_PARM(i)     (48 + ((i)-3)*8)
17
18#ifdef CONFIG_TRACEPOINTS
19
20	.section	".toc","aw"
21
22	.globl hcall_tracepoint_refcount
23hcall_tracepoint_refcount:
24	.llong	0
25
26	.section	".text"
27
28/*
29 * precall must preserve all registers.  use unused STK_PARM()
30 * areas to save snapshots and opcode. We branch around this
31 * in early init (eg when populating the MMU hashtable) by using an
32 * unconditional cpu feature.
33 */
34#define HCALL_INST_PRECALL(FIRST_REG)				\
35BEGIN_FTR_SECTION;						\
36	b	1f;						\
37END_FTR_SECTION(0, 1);						\
38	ld      r12,hcall_tracepoint_refcount@toc(r2);		\
39	cmpdi	r12,0;						\
40	beq+	1f;						\
41	mflr	r0;						\
42	std	r3,STK_PARM(r3)(r1);				\
43	std	r4,STK_PARM(r4)(r1);				\
44	std	r5,STK_PARM(r5)(r1);				\
45	std	r6,STK_PARM(r6)(r1);				\
46	std	r7,STK_PARM(r7)(r1);				\
47	std	r8,STK_PARM(r8)(r1);				\
48	std	r9,STK_PARM(r9)(r1);				\
49	std	r10,STK_PARM(r10)(r1);				\
50	std	r0,16(r1);					\
51	addi	r4,r1,STK_PARM(FIRST_REG);			\
52	stdu	r1,-STACK_FRAME_OVERHEAD(r1);			\
53	bl	.__trace_hcall_entry;				\
54	addi	r1,r1,STACK_FRAME_OVERHEAD;			\
55	ld	r0,16(r1);					\
56	ld	r3,STK_PARM(r3)(r1);				\
57	ld	r4,STK_PARM(r4)(r1);				\
58	ld	r5,STK_PARM(r5)(r1);				\
59	ld	r6,STK_PARM(r6)(r1);				\
60	ld	r7,STK_PARM(r7)(r1);				\
61	ld	r8,STK_PARM(r8)(r1);				\
62	ld	r9,STK_PARM(r9)(r1);				\
63	ld	r10,STK_PARM(r10)(r1);				\
64	mtlr	r0;						\
651:
66
67/*
68 * postcall is performed immediately before function return which
69 * allows liberal use of volatile registers.  We branch around this
70 * in early init (eg when populating the MMU hashtable) by using an
71 * unconditional cpu feature.
72 */
73#define __HCALL_INST_POSTCALL					\
74BEGIN_FTR_SECTION;						\
75	b	1f;						\
76END_FTR_SECTION(0, 1);						\
77	ld      r12,hcall_tracepoint_refcount@toc(r2);		\
78	cmpdi	r12,0;						\
79	beq+	1f;						\
80	mflr	r0;						\
81	ld	r6,STK_PARM(r3)(r1);				\
82	std	r3,STK_PARM(r3)(r1);				\
83	mr	r4,r3;						\
84	mr	r3,r6;						\
85	std	r0,16(r1);					\
86	stdu	r1,-STACK_FRAME_OVERHEAD(r1);			\
87	bl	.__trace_hcall_exit;				\
88	addi	r1,r1,STACK_FRAME_OVERHEAD;			\
89	ld	r0,16(r1);					\
90	ld	r3,STK_PARM(r3)(r1);				\
91	mtlr	r0;						\
921:
93
94#define HCALL_INST_POSTCALL_NORETS				\
95	li	r5,0;						\
96	__HCALL_INST_POSTCALL
97
98#define HCALL_INST_POSTCALL(BUFREG)				\
99	mr	r5,BUFREG;					\
100	__HCALL_INST_POSTCALL
101
102#else
103#define HCALL_INST_PRECALL(FIRST_ARG)
104#define HCALL_INST_POSTCALL_NORETS
105#define HCALL_INST_POSTCALL(BUFREG)
106#endif
107
108	.text
109
110_GLOBAL(plpar_hcall_norets)
111	HMT_MEDIUM
112
113	mfcr	r0
114	stw	r0,8(r1)
115
116	HCALL_INST_PRECALL(r4)
117
118	HVSC				/* invoke the hypervisor */
119
120	HCALL_INST_POSTCALL_NORETS
121
122	lwz	r0,8(r1)
123	mtcrf	0xff,r0
124	blr				/* return r3 = status */
125
126_GLOBAL(plpar_hcall)
127	HMT_MEDIUM
128
129	mfcr	r0
130	stw	r0,8(r1)
131
132	HCALL_INST_PRECALL(r5)
133
134	std     r4,STK_PARM(r4)(r1)     /* Save ret buffer */
135
136	mr	r4,r5
137	mr	r5,r6
138	mr	r6,r7
139	mr	r7,r8
140	mr	r8,r9
141	mr	r9,r10
142
143	HVSC				/* invoke the hypervisor */
144
145	ld	r12,STK_PARM(r4)(r1)
146	std	r4,  0(r12)
147	std	r5,  8(r12)
148	std	r6, 16(r12)
149	std	r7, 24(r12)
150
151	HCALL_INST_POSTCALL(r12)
152
153	lwz	r0,8(r1)
154	mtcrf	0xff,r0
155
156	blr				/* return r3 = status */
157
158/*
159 * plpar_hcall_raw can be called in real mode. kexec/kdump need some
160 * hypervisor calls to be executed in real mode. So plpar_hcall_raw
161 * does not access the per cpu hypervisor call statistics variables,
162 * since these variables may not be present in the RMO region.
163 */
164_GLOBAL(plpar_hcall_raw)
165	HMT_MEDIUM
166
167	mfcr	r0
168	stw	r0,8(r1)
169
170	std     r4,STK_PARM(r4)(r1)     /* Save ret buffer */
171
172	mr	r4,r5
173	mr	r5,r6
174	mr	r6,r7
175	mr	r7,r8
176	mr	r8,r9
177	mr	r9,r10
178
179	HVSC				/* invoke the hypervisor */
180
181	ld	r12,STK_PARM(r4)(r1)
182	std	r4,  0(r12)
183	std	r5,  8(r12)
184	std	r6, 16(r12)
185	std	r7, 24(r12)
186
187	lwz	r0,8(r1)
188	mtcrf	0xff,r0
189
190	blr				/* return r3 = status */
191
192_GLOBAL(plpar_hcall9)
193	HMT_MEDIUM
194
195	mfcr	r0
196	stw	r0,8(r1)
197
198	HCALL_INST_PRECALL(r5)
199
200	std     r4,STK_PARM(r4)(r1)     /* Save ret buffer */
201
202	mr	r4,r5
203	mr	r5,r6
204	mr	r6,r7
205	mr	r7,r8
206	mr	r8,r9
207	mr	r9,r10
208	ld	r10,STK_PARM(r11)(r1)	 /* put arg7 in R10 */
209	ld	r11,STK_PARM(r12)(r1)	 /* put arg8 in R11 */
210	ld	r12,STK_PARM(r13)(r1)    /* put arg9 in R12 */
211
212	HVSC				/* invoke the hypervisor */
213
214	mr	r0,r12
215	ld	r12,STK_PARM(r4)(r1)
216	std	r4,  0(r12)
217	std	r5,  8(r12)
218	std	r6, 16(r12)
219	std	r7, 24(r12)
220	std	r8, 32(r12)
221	std	r9, 40(r12)
222	std	r10,48(r12)
223	std	r11,56(r12)
224	std	r0, 64(r12)
225
226	HCALL_INST_POSTCALL(r12)
227
228	lwz	r0,8(r1)
229	mtcrf	0xff,r0
230
231	blr				/* return r3 = status */
232
233/* See plpar_hcall_raw to see why this is needed */
234_GLOBAL(plpar_hcall9_raw)
235	HMT_MEDIUM
236
237	mfcr	r0
238	stw	r0,8(r1)
239
240	std     r4,STK_PARM(r4)(r1)     /* Save ret buffer */
241
242	mr	r4,r5
243	mr	r5,r6
244	mr	r6,r7
245	mr	r7,r8
246	mr	r8,r9
247	mr	r9,r10
248	ld	r10,STK_PARM(r11)(r1)	 /* put arg7 in R10 */
249	ld	r11,STK_PARM(r12)(r1)	 /* put arg8 in R11 */
250	ld	r12,STK_PARM(r13)(r1)    /* put arg9 in R12 */
251
252	HVSC				/* invoke the hypervisor */
253
254	mr	r0,r12
255	ld	r12,STK_PARM(r4)(r1)
256	std	r4,  0(r12)
257	std	r5,  8(r12)
258	std	r6, 16(r12)
259	std	r7, 24(r12)
260	std	r8, 32(r12)
261	std	r9, 40(r12)
262	std	r10,48(r12)
263	std	r11,56(r12)
264	std	r0, 64(r12)
265
266	lwz	r0,8(r1)
267	mtcrf	0xff,r0
268
269	blr				/* return r3 = status */
270