xref: /openbmc/linux/arch/x86/include/asm/xen/hypercall.h (revision f87deada)
1 /******************************************************************************
2  * hypercall.h
3  *
4  * Linux-specific hypervisor handling.
5  *
6  * Copyright (c) 2002-2004, K A Fraser
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #ifndef _ASM_X86_XEN_HYPERCALL_H
34 #define _ASM_X86_XEN_HYPERCALL_H
35 
36 #include <linux/kernel.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
41 
42 #include <trace/events/xen.h>
43 
44 #include <asm/page.h>
45 #include <asm/pgtable.h>
46 #include <asm/smap.h>
47 #include <asm/nospec-branch.h>
48 
49 #include <xen/interface/xen.h>
50 #include <xen/interface/sched.h>
51 #include <xen/interface/physdev.h>
52 #include <xen/interface/platform.h>
53 #include <xen/interface/xen-mca.h>
54 
55 struct xen_dm_op_buf;
56 
57 /*
58  * The hypercall asms have to meet several constraints:
59  * - Work on 32- and 64-bit.
60  *    The two architectures put their arguments in different sets of
61  *    registers.
62  *
63  * - Work around asm syntax quirks
64  *    It isn't possible to specify one of the rNN registers in a
65  *    constraint, so we use explicit register variables to get the
66  *    args into the right place.
67  *
68  * - Mark all registers as potentially clobbered
69  *    Even unused parameters can be clobbered by the hypervisor, so we
70  *    need to make sure gcc knows it.
71  *
72  * - Avoid compiler bugs.
73  *    This is the tricky part.  Because x86_32 has such a constrained
74  *    register set, gcc versions below 4.3 have trouble generating
75  *    code when all the arg registers and memory are trashed by the
76  *    asm.  There are syntactically simpler ways of achieving the
77  *    semantics below, but they cause the compiler to crash.
78  *
79  *    The only combination I found which works is:
80  *     - assign the __argX variables first
81  *     - list all actually used parameters as "+r" (__argX)
82  *     - clobber the rest
83  *
84  * The result certainly isn't pretty, and it really shows up cpp's
85  * weakness as as macro language.  Sorry.  (But let's just give thanks
86  * there aren't more than 5 arguments...)
87  */
88 
89 extern struct { char _entry[32]; } hypercall_page[];
90 
91 #define __HYPERCALL		"call hypercall_page+%c[offset]"
92 #define __HYPERCALL_ENTRY(x)						\
93 	[offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
94 
95 #ifdef CONFIG_X86_32
96 #define __HYPERCALL_RETREG	"eax"
97 #define __HYPERCALL_ARG1REG	"ebx"
98 #define __HYPERCALL_ARG2REG	"ecx"
99 #define __HYPERCALL_ARG3REG	"edx"
100 #define __HYPERCALL_ARG4REG	"esi"
101 #define __HYPERCALL_ARG5REG	"edi"
102 #else
103 #define __HYPERCALL_RETREG	"rax"
104 #define __HYPERCALL_ARG1REG	"rdi"
105 #define __HYPERCALL_ARG2REG	"rsi"
106 #define __HYPERCALL_ARG3REG	"rdx"
107 #define __HYPERCALL_ARG4REG	"r10"
108 #define __HYPERCALL_ARG5REG	"r8"
109 #endif
110 
111 #define __HYPERCALL_DECLS						\
112 	register unsigned long __res  asm(__HYPERCALL_RETREG);		\
113 	register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
114 	register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
115 	register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
116 	register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
117 	register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
118 
119 #define __HYPERCALL_0PARAM	"=r" (__res), ASM_CALL_CONSTRAINT
120 #define __HYPERCALL_1PARAM	__HYPERCALL_0PARAM, "+r" (__arg1)
121 #define __HYPERCALL_2PARAM	__HYPERCALL_1PARAM, "+r" (__arg2)
122 #define __HYPERCALL_3PARAM	__HYPERCALL_2PARAM, "+r" (__arg3)
123 #define __HYPERCALL_4PARAM	__HYPERCALL_3PARAM, "+r" (__arg4)
124 #define __HYPERCALL_5PARAM	__HYPERCALL_4PARAM, "+r" (__arg5)
125 
126 #define __HYPERCALL_0ARG()
127 #define __HYPERCALL_1ARG(a1)						\
128 	__HYPERCALL_0ARG()		__arg1 = (unsigned long)(a1);
129 #define __HYPERCALL_2ARG(a1,a2)						\
130 	__HYPERCALL_1ARG(a1)		__arg2 = (unsigned long)(a2);
131 #define __HYPERCALL_3ARG(a1,a2,a3)					\
132 	__HYPERCALL_2ARG(a1,a2)		__arg3 = (unsigned long)(a3);
133 #define __HYPERCALL_4ARG(a1,a2,a3,a4)					\
134 	__HYPERCALL_3ARG(a1,a2,a3)	__arg4 = (unsigned long)(a4);
135 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5)				\
136 	__HYPERCALL_4ARG(a1,a2,a3,a4)	__arg5 = (unsigned long)(a5);
137 
138 #define __HYPERCALL_CLOBBER5	"memory"
139 #define __HYPERCALL_CLOBBER4	__HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
140 #define __HYPERCALL_CLOBBER3	__HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
141 #define __HYPERCALL_CLOBBER2	__HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
142 #define __HYPERCALL_CLOBBER1	__HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
143 #define __HYPERCALL_CLOBBER0	__HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
144 
145 #define _hypercall0(type, name)						\
146 ({									\
147 	__HYPERCALL_DECLS;						\
148 	__HYPERCALL_0ARG();						\
149 	asm volatile (__HYPERCALL					\
150 		      : __HYPERCALL_0PARAM				\
151 		      : __HYPERCALL_ENTRY(name)				\
152 		      : __HYPERCALL_CLOBBER0);				\
153 	(type)__res;							\
154 })
155 
156 #define _hypercall1(type, name, a1)					\
157 ({									\
158 	__HYPERCALL_DECLS;						\
159 	__HYPERCALL_1ARG(a1);						\
160 	asm volatile (__HYPERCALL					\
161 		      : __HYPERCALL_1PARAM				\
162 		      : __HYPERCALL_ENTRY(name)				\
163 		      : __HYPERCALL_CLOBBER1);				\
164 	(type)__res;							\
165 })
166 
167 #define _hypercall2(type, name, a1, a2)					\
168 ({									\
169 	__HYPERCALL_DECLS;						\
170 	__HYPERCALL_2ARG(a1, a2);					\
171 	asm volatile (__HYPERCALL					\
172 		      : __HYPERCALL_2PARAM				\
173 		      : __HYPERCALL_ENTRY(name)				\
174 		      : __HYPERCALL_CLOBBER2);				\
175 	(type)__res;							\
176 })
177 
178 #define _hypercall3(type, name, a1, a2, a3)				\
179 ({									\
180 	__HYPERCALL_DECLS;						\
181 	__HYPERCALL_3ARG(a1, a2, a3);					\
182 	asm volatile (__HYPERCALL					\
183 		      : __HYPERCALL_3PARAM				\
184 		      : __HYPERCALL_ENTRY(name)				\
185 		      : __HYPERCALL_CLOBBER3);				\
186 	(type)__res;							\
187 })
188 
189 #define _hypercall4(type, name, a1, a2, a3, a4)				\
190 ({									\
191 	__HYPERCALL_DECLS;						\
192 	__HYPERCALL_4ARG(a1, a2, a3, a4);				\
193 	asm volatile (__HYPERCALL					\
194 		      : __HYPERCALL_4PARAM				\
195 		      : __HYPERCALL_ENTRY(name)				\
196 		      : __HYPERCALL_CLOBBER4);				\
197 	(type)__res;							\
198 })
199 
200 #define _hypercall5(type, name, a1, a2, a3, a4, a5)			\
201 ({									\
202 	__HYPERCALL_DECLS;						\
203 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);				\
204 	asm volatile (__HYPERCALL					\
205 		      : __HYPERCALL_5PARAM				\
206 		      : __HYPERCALL_ENTRY(name)				\
207 		      : __HYPERCALL_CLOBBER5);				\
208 	(type)__res;							\
209 })
210 
211 static inline long
212 privcmd_call(unsigned call,
213 	     unsigned long a1, unsigned long a2,
214 	     unsigned long a3, unsigned long a4,
215 	     unsigned long a5)
216 {
217 	__HYPERCALL_DECLS;
218 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
219 
220 	stac();
221 	asm volatile(CALL_NOSPEC
222 		     : __HYPERCALL_5PARAM
223 		     : [thunk_target] "a" (&hypercall_page[call])
224 		     : __HYPERCALL_CLOBBER5);
225 	clac();
226 
227 	return (long)__res;
228 }
229 
230 static inline int
231 HYPERVISOR_set_trap_table(struct trap_info *table)
232 {
233 	return _hypercall1(int, set_trap_table, table);
234 }
235 
236 static inline int
237 HYPERVISOR_mmu_update(struct mmu_update *req, int count,
238 		      int *success_count, domid_t domid)
239 {
240 	return _hypercall4(int, mmu_update, req, count, success_count, domid);
241 }
242 
243 static inline int
244 HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
245 		     int *success_count, domid_t domid)
246 {
247 	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
248 }
249 
250 static inline int
251 HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
252 {
253 	return _hypercall2(int, set_gdt, frame_list, entries);
254 }
255 
256 static inline int
257 HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
258 {
259 	return _hypercall2(int, stack_switch, ss, esp);
260 }
261 
262 #ifdef CONFIG_X86_32
263 static inline int
264 HYPERVISOR_set_callbacks(unsigned long event_selector,
265 			 unsigned long event_address,
266 			 unsigned long failsafe_selector,
267 			 unsigned long failsafe_address)
268 {
269 	return _hypercall4(int, set_callbacks,
270 			   event_selector, event_address,
271 			   failsafe_selector, failsafe_address);
272 }
273 #else  /* CONFIG_X86_64 */
274 static inline int
275 HYPERVISOR_set_callbacks(unsigned long event_address,
276 			unsigned long failsafe_address,
277 			unsigned long syscall_address)
278 {
279 	return _hypercall3(int, set_callbacks,
280 			   event_address, failsafe_address,
281 			   syscall_address);
282 }
283 #endif  /* CONFIG_X86_{32,64} */
284 
285 static inline int
286 HYPERVISOR_callback_op(int cmd, void *arg)
287 {
288 	return _hypercall2(int, callback_op, cmd, arg);
289 }
290 
291 static inline int
292 HYPERVISOR_fpu_taskswitch(int set)
293 {
294 	return _hypercall1(int, fpu_taskswitch, set);
295 }
296 
297 static inline int
298 HYPERVISOR_sched_op(int cmd, void *arg)
299 {
300 	return _hypercall2(int, sched_op, cmd, arg);
301 }
302 
303 static inline long
304 HYPERVISOR_set_timer_op(u64 timeout)
305 {
306 	unsigned long timeout_hi = (unsigned long)(timeout>>32);
307 	unsigned long timeout_lo = (unsigned long)timeout;
308 	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
309 }
310 
311 static inline int
312 HYPERVISOR_mca(struct xen_mc *mc_op)
313 {
314 	mc_op->interface_version = XEN_MCA_INTERFACE_VERSION;
315 	return _hypercall1(int, mca, mc_op);
316 }
317 
318 static inline int
319 HYPERVISOR_platform_op(struct xen_platform_op *op)
320 {
321 	op->interface_version = XENPF_INTERFACE_VERSION;
322 	return _hypercall1(int, platform_op, op);
323 }
324 
325 static inline int
326 HYPERVISOR_set_debugreg(int reg, unsigned long value)
327 {
328 	return _hypercall2(int, set_debugreg, reg, value);
329 }
330 
331 static inline unsigned long
332 HYPERVISOR_get_debugreg(int reg)
333 {
334 	return _hypercall1(unsigned long, get_debugreg, reg);
335 }
336 
337 static inline int
338 HYPERVISOR_update_descriptor(u64 ma, u64 desc)
339 {
340 	if (sizeof(u64) == sizeof(long))
341 		return _hypercall2(int, update_descriptor, ma, desc);
342 	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
343 }
344 
345 static inline long
346 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
347 {
348 	return _hypercall2(long, memory_op, cmd, arg);
349 }
350 
351 static inline int
352 HYPERVISOR_multicall(void *call_list, uint32_t nr_calls)
353 {
354 	return _hypercall2(int, multicall, call_list, nr_calls);
355 }
356 
357 static inline int
358 HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
359 			     unsigned long flags)
360 {
361 	if (sizeof(new_val) == sizeof(long))
362 		return _hypercall3(int, update_va_mapping, va,
363 				   new_val.pte, flags);
364 	else
365 		return _hypercall4(int, update_va_mapping, va,
366 				   new_val.pte, new_val.pte >> 32, flags);
367 }
368 extern int __must_check xen_event_channel_op_compat(int, void *);
369 
370 static inline int
371 HYPERVISOR_event_channel_op(int cmd, void *arg)
372 {
373 	int rc = _hypercall2(int, event_channel_op, cmd, arg);
374 	if (unlikely(rc == -ENOSYS))
375 		rc = xen_event_channel_op_compat(cmd, arg);
376 	return rc;
377 }
378 
379 static inline int
380 HYPERVISOR_xen_version(int cmd, void *arg)
381 {
382 	return _hypercall2(int, xen_version, cmd, arg);
383 }
384 
385 static inline int
386 HYPERVISOR_console_io(int cmd, int count, char *str)
387 {
388 	return _hypercall3(int, console_io, cmd, count, str);
389 }
390 
391 extern int __must_check xen_physdev_op_compat(int, void *);
392 
393 static inline int
394 HYPERVISOR_physdev_op(int cmd, void *arg)
395 {
396 	int rc = _hypercall2(int, physdev_op, cmd, arg);
397 	if (unlikely(rc == -ENOSYS))
398 		rc = xen_physdev_op_compat(cmd, arg);
399 	return rc;
400 }
401 
402 static inline int
403 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
404 {
405 	return _hypercall3(int, grant_table_op, cmd, uop, count);
406 }
407 
408 static inline int
409 HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
410 					 unsigned long flags, domid_t domid)
411 {
412 	if (sizeof(new_val) == sizeof(long))
413 		return _hypercall4(int, update_va_mapping_otherdomain, va,
414 				   new_val.pte, flags, domid);
415 	else
416 		return _hypercall5(int, update_va_mapping_otherdomain, va,
417 				   new_val.pte, new_val.pte >> 32,
418 				   flags, domid);
419 }
420 
421 static inline int
422 HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
423 {
424 	return _hypercall2(int, vm_assist, cmd, type);
425 }
426 
427 static inline int
428 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
429 {
430 	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
431 }
432 
433 #ifdef CONFIG_X86_64
434 static inline int
435 HYPERVISOR_set_segment_base(int reg, unsigned long value)
436 {
437 	return _hypercall2(int, set_segment_base, reg, value);
438 }
439 #endif
440 
441 static inline int
442 HYPERVISOR_suspend(unsigned long start_info_mfn)
443 {
444 	struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
445 
446 	/*
447 	 * For a PV guest the tools require that the start_info mfn be
448 	 * present in rdx/edx when the hypercall is made. Per the
449 	 * hypercall calling convention this is the third hypercall
450 	 * argument, which is start_info_mfn here.
451 	 */
452 	return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
453 }
454 
455 static inline int
456 HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
457 {
458 	return _hypercall2(int, nmi_op, op, arg);
459 }
460 
461 static inline unsigned long __must_check
462 HYPERVISOR_hvm_op(int op, void *arg)
463 {
464        return _hypercall2(unsigned long, hvm_op, op, arg);
465 }
466 
467 static inline int
468 HYPERVISOR_tmem_op(
469 	struct tmem_op *op)
470 {
471 	return _hypercall1(int, tmem_op, op);
472 }
473 
474 static inline int
475 HYPERVISOR_xenpmu_op(unsigned int op, void *arg)
476 {
477 	return _hypercall2(int, xenpmu_op, op, arg);
478 }
479 
480 static inline int
481 HYPERVISOR_dm_op(
482 	domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs)
483 {
484 	int ret;
485 	stac();
486 	ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs);
487 	clac();
488 	return ret;
489 }
490 
491 static inline void
492 MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
493 {
494 	mcl->op = __HYPERVISOR_fpu_taskswitch;
495 	mcl->args[0] = set;
496 
497 	trace_xen_mc_entry(mcl, 1);
498 }
499 
500 static inline void
501 MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
502 			pte_t new_val, unsigned long flags)
503 {
504 	mcl->op = __HYPERVISOR_update_va_mapping;
505 	mcl->args[0] = va;
506 	if (sizeof(new_val) == sizeof(long)) {
507 		mcl->args[1] = new_val.pte;
508 		mcl->args[2] = flags;
509 	} else {
510 		mcl->args[1] = new_val.pte;
511 		mcl->args[2] = new_val.pte >> 32;
512 		mcl->args[3] = flags;
513 	}
514 
515 	trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
516 }
517 
518 static inline void
519 MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
520 		     void *uop, unsigned int count)
521 {
522 	mcl->op = __HYPERVISOR_grant_table_op;
523 	mcl->args[0] = cmd;
524 	mcl->args[1] = (unsigned long)uop;
525 	mcl->args[2] = count;
526 
527 	trace_xen_mc_entry(mcl, 3);
528 }
529 
530 static inline void
531 MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
532 				    pte_t new_val, unsigned long flags,
533 				    domid_t domid)
534 {
535 	mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
536 	mcl->args[0] = va;
537 	if (sizeof(new_val) == sizeof(long)) {
538 		mcl->args[1] = new_val.pte;
539 		mcl->args[2] = flags;
540 		mcl->args[3] = domid;
541 	} else {
542 		mcl->args[1] = new_val.pte;
543 		mcl->args[2] = new_val.pte >> 32;
544 		mcl->args[3] = flags;
545 		mcl->args[4] = domid;
546 	}
547 
548 	trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 4 : 5);
549 }
550 
551 static inline void
552 MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
553 			struct desc_struct desc)
554 {
555 	mcl->op = __HYPERVISOR_update_descriptor;
556 	if (sizeof(maddr) == sizeof(long)) {
557 		mcl->args[0] = maddr;
558 		mcl->args[1] = *(unsigned long *)&desc;
559 	} else {
560 		u32 *p = (u32 *)&desc;
561 
562 		mcl->args[0] = maddr;
563 		mcl->args[1] = maddr >> 32;
564 		mcl->args[2] = *p++;
565 		mcl->args[3] = *p;
566 	}
567 
568 	trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
569 }
570 
571 static inline void
572 MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
573 {
574 	mcl->op = __HYPERVISOR_memory_op;
575 	mcl->args[0] = cmd;
576 	mcl->args[1] = (unsigned long)arg;
577 
578 	trace_xen_mc_entry(mcl, 2);
579 }
580 
581 static inline void
582 MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
583 		 int count, int *success_count, domid_t domid)
584 {
585 	mcl->op = __HYPERVISOR_mmu_update;
586 	mcl->args[0] = (unsigned long)req;
587 	mcl->args[1] = count;
588 	mcl->args[2] = (unsigned long)success_count;
589 	mcl->args[3] = domid;
590 
591 	trace_xen_mc_entry(mcl, 4);
592 }
593 
594 static inline void
595 MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
596 		int *success_count, domid_t domid)
597 {
598 	mcl->op = __HYPERVISOR_mmuext_op;
599 	mcl->args[0] = (unsigned long)op;
600 	mcl->args[1] = count;
601 	mcl->args[2] = (unsigned long)success_count;
602 	mcl->args[3] = domid;
603 
604 	trace_xen_mc_entry(mcl, 4);
605 }
606 
607 static inline void
608 MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
609 {
610 	mcl->op = __HYPERVISOR_set_gdt;
611 	mcl->args[0] = (unsigned long)frames;
612 	mcl->args[1] = entries;
613 
614 	trace_xen_mc_entry(mcl, 2);
615 }
616 
617 static inline void
618 MULTI_stack_switch(struct multicall_entry *mcl,
619 		   unsigned long ss, unsigned long esp)
620 {
621 	mcl->op = __HYPERVISOR_stack_switch;
622 	mcl->args[0] = ss;
623 	mcl->args[1] = esp;
624 
625 	trace_xen_mc_entry(mcl, 2);
626 }
627 
628 #endif /* _ASM_X86_XEN_HYPERCALL_H */
629