1 /****************************************************************************** 2 * hypercall.h 3 * 4 * Linux-specific hypervisor handling. 5 * 6 * Copyright (c) 2002-2004, K A Fraser 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 */ 32 33 #ifndef _ASM_X86_XEN_HYPERCALL_H 34 #define _ASM_X86_XEN_HYPERCALL_H 35 36 #include <linux/kernel.h> 37 #include <linux/spinlock.h> 38 #include <linux/errno.h> 39 #include <linux/string.h> 40 #include <linux/types.h> 41 42 #include <trace/events/xen.h> 43 44 #include <asm/page.h> 45 #include <asm/pgtable.h> 46 47 #include <xen/interface/xen.h> 48 #include <xen/interface/sched.h> 49 #include <xen/interface/physdev.h> 50 #include <xen/interface/platform.h> 51 #include <xen/interface/xen-mca.h> 52 53 /* 54 * The hypercall asms have to meet several constraints: 55 * - Work on 32- and 64-bit. 56 * The two architectures put their arguments in different sets of 57 * registers. 58 * 59 * - Work around asm syntax quirks 60 * It isn't possible to specify one of the rNN registers in a 61 * constraint, so we use explicit register variables to get the 62 * args into the right place. 63 * 64 * - Mark all registers as potentially clobbered 65 * Even unused parameters can be clobbered by the hypervisor, so we 66 * need to make sure gcc knows it. 67 * 68 * - Avoid compiler bugs. 69 * This is the tricky part. Because x86_32 has such a constrained 70 * register set, gcc versions below 4.3 have trouble generating 71 * code when all the arg registers and memory are trashed by the 72 * asm. There are syntactically simpler ways of achieving the 73 * semantics below, but they cause the compiler to crash. 74 * 75 * The only combination I found which works is: 76 * - assign the __argX variables first 77 * - list all actually used parameters as "+r" (__argX) 78 * - clobber the rest 79 * 80 * The result certainly isn't pretty, and it really shows up cpp's 81 * weakness as as macro language. Sorry. (But let's just give thanks 82 * there aren't more than 5 arguments...) 83 */ 84 85 extern struct { char _entry[32]; } hypercall_page[]; 86 87 #define __HYPERCALL "call hypercall_page+%c[offset]" 88 #define __HYPERCALL_ENTRY(x) \ 89 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0])) 90 91 #ifdef CONFIG_X86_32 92 #define __HYPERCALL_RETREG "eax" 93 #define __HYPERCALL_ARG1REG "ebx" 94 #define __HYPERCALL_ARG2REG "ecx" 95 #define __HYPERCALL_ARG3REG "edx" 96 #define __HYPERCALL_ARG4REG "esi" 97 #define __HYPERCALL_ARG5REG "edi" 98 #else 99 #define __HYPERCALL_RETREG "rax" 100 #define __HYPERCALL_ARG1REG "rdi" 101 #define __HYPERCALL_ARG2REG "rsi" 102 #define __HYPERCALL_ARG3REG "rdx" 103 #define __HYPERCALL_ARG4REG "r10" 104 #define __HYPERCALL_ARG5REG "r8" 105 #endif 106 107 #define __HYPERCALL_DECLS \ 108 register unsigned long __res asm(__HYPERCALL_RETREG); \ 109 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \ 110 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \ 111 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \ 112 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \ 113 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; 114 115 #define __HYPERCALL_0PARAM "=r" (__res) 116 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1) 117 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2) 118 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3) 119 #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4) 120 #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5) 121 122 #define __HYPERCALL_0ARG() 123 #define __HYPERCALL_1ARG(a1) \ 124 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1); 125 #define __HYPERCALL_2ARG(a1,a2) \ 126 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2); 127 #define __HYPERCALL_3ARG(a1,a2,a3) \ 128 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3); 129 #define __HYPERCALL_4ARG(a1,a2,a3,a4) \ 130 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4); 131 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \ 132 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5); 133 134 #define __HYPERCALL_CLOBBER5 "memory" 135 #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG 136 #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG 137 #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG 138 #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG 139 #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG 140 141 #define _hypercall0(type, name) \ 142 ({ \ 143 __HYPERCALL_DECLS; \ 144 __HYPERCALL_0ARG(); \ 145 asm volatile (__HYPERCALL \ 146 : __HYPERCALL_0PARAM \ 147 : __HYPERCALL_ENTRY(name) \ 148 : __HYPERCALL_CLOBBER0); \ 149 (type)__res; \ 150 }) 151 152 #define _hypercall1(type, name, a1) \ 153 ({ \ 154 __HYPERCALL_DECLS; \ 155 __HYPERCALL_1ARG(a1); \ 156 asm volatile (__HYPERCALL \ 157 : __HYPERCALL_1PARAM \ 158 : __HYPERCALL_ENTRY(name) \ 159 : __HYPERCALL_CLOBBER1); \ 160 (type)__res; \ 161 }) 162 163 #define _hypercall2(type, name, a1, a2) \ 164 ({ \ 165 __HYPERCALL_DECLS; \ 166 __HYPERCALL_2ARG(a1, a2); \ 167 asm volatile (__HYPERCALL \ 168 : __HYPERCALL_2PARAM \ 169 : __HYPERCALL_ENTRY(name) \ 170 : __HYPERCALL_CLOBBER2); \ 171 (type)__res; \ 172 }) 173 174 #define _hypercall3(type, name, a1, a2, a3) \ 175 ({ \ 176 __HYPERCALL_DECLS; \ 177 __HYPERCALL_3ARG(a1, a2, a3); \ 178 asm volatile (__HYPERCALL \ 179 : __HYPERCALL_3PARAM \ 180 : __HYPERCALL_ENTRY(name) \ 181 : __HYPERCALL_CLOBBER3); \ 182 (type)__res; \ 183 }) 184 185 #define _hypercall4(type, name, a1, a2, a3, a4) \ 186 ({ \ 187 __HYPERCALL_DECLS; \ 188 __HYPERCALL_4ARG(a1, a2, a3, a4); \ 189 asm volatile (__HYPERCALL \ 190 : __HYPERCALL_4PARAM \ 191 : __HYPERCALL_ENTRY(name) \ 192 : __HYPERCALL_CLOBBER4); \ 193 (type)__res; \ 194 }) 195 196 #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ 197 ({ \ 198 __HYPERCALL_DECLS; \ 199 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \ 200 asm volatile (__HYPERCALL \ 201 : __HYPERCALL_5PARAM \ 202 : __HYPERCALL_ENTRY(name) \ 203 : __HYPERCALL_CLOBBER5); \ 204 (type)__res; \ 205 }) 206 207 static inline long 208 privcmd_call(unsigned call, 209 unsigned long a1, unsigned long a2, 210 unsigned long a3, unsigned long a4, 211 unsigned long a5) 212 { 213 __HYPERCALL_DECLS; 214 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); 215 216 asm volatile("call *%[call]" 217 : __HYPERCALL_5PARAM 218 : [call] "a" (&hypercall_page[call]) 219 : __HYPERCALL_CLOBBER5); 220 221 return (long)__res; 222 } 223 224 static inline int 225 HYPERVISOR_set_trap_table(struct trap_info *table) 226 { 227 return _hypercall1(int, set_trap_table, table); 228 } 229 230 static inline int 231 HYPERVISOR_mmu_update(struct mmu_update *req, int count, 232 int *success_count, domid_t domid) 233 { 234 return _hypercall4(int, mmu_update, req, count, success_count, domid); 235 } 236 237 static inline int 238 HYPERVISOR_mmuext_op(struct mmuext_op *op, int count, 239 int *success_count, domid_t domid) 240 { 241 return _hypercall4(int, mmuext_op, op, count, success_count, domid); 242 } 243 244 static inline int 245 HYPERVISOR_set_gdt(unsigned long *frame_list, int entries) 246 { 247 return _hypercall2(int, set_gdt, frame_list, entries); 248 } 249 250 static inline int 251 HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp) 252 { 253 return _hypercall2(int, stack_switch, ss, esp); 254 } 255 256 #ifdef CONFIG_X86_32 257 static inline int 258 HYPERVISOR_set_callbacks(unsigned long event_selector, 259 unsigned long event_address, 260 unsigned long failsafe_selector, 261 unsigned long failsafe_address) 262 { 263 return _hypercall4(int, set_callbacks, 264 event_selector, event_address, 265 failsafe_selector, failsafe_address); 266 } 267 #else /* CONFIG_X86_64 */ 268 static inline int 269 HYPERVISOR_set_callbacks(unsigned long event_address, 270 unsigned long failsafe_address, 271 unsigned long syscall_address) 272 { 273 return _hypercall3(int, set_callbacks, 274 event_address, failsafe_address, 275 syscall_address); 276 } 277 #endif /* CONFIG_X86_{32,64} */ 278 279 static inline int 280 HYPERVISOR_callback_op(int cmd, void *arg) 281 { 282 return _hypercall2(int, callback_op, cmd, arg); 283 } 284 285 static inline int 286 HYPERVISOR_fpu_taskswitch(int set) 287 { 288 return _hypercall1(int, fpu_taskswitch, set); 289 } 290 291 static inline int 292 HYPERVISOR_sched_op(int cmd, void *arg) 293 { 294 return _hypercall2(int, sched_op, cmd, arg); 295 } 296 297 static inline long 298 HYPERVISOR_set_timer_op(u64 timeout) 299 { 300 unsigned long timeout_hi = (unsigned long)(timeout>>32); 301 unsigned long timeout_lo = (unsigned long)timeout; 302 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); 303 } 304 305 static inline int 306 HYPERVISOR_mca(struct xen_mc *mc_op) 307 { 308 mc_op->interface_version = XEN_MCA_INTERFACE_VERSION; 309 return _hypercall1(int, mca, mc_op); 310 } 311 312 static inline int 313 HYPERVISOR_dom0_op(struct xen_platform_op *platform_op) 314 { 315 platform_op->interface_version = XENPF_INTERFACE_VERSION; 316 return _hypercall1(int, dom0_op, platform_op); 317 } 318 319 static inline int 320 HYPERVISOR_set_debugreg(int reg, unsigned long value) 321 { 322 return _hypercall2(int, set_debugreg, reg, value); 323 } 324 325 static inline unsigned long 326 HYPERVISOR_get_debugreg(int reg) 327 { 328 return _hypercall1(unsigned long, get_debugreg, reg); 329 } 330 331 static inline int 332 HYPERVISOR_update_descriptor(u64 ma, u64 desc) 333 { 334 if (sizeof(u64) == sizeof(long)) 335 return _hypercall2(int, update_descriptor, ma, desc); 336 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); 337 } 338 339 static inline int 340 HYPERVISOR_memory_op(unsigned int cmd, void *arg) 341 { 342 return _hypercall2(int, memory_op, cmd, arg); 343 } 344 345 static inline int 346 HYPERVISOR_multicall(void *call_list, uint32_t nr_calls) 347 { 348 return _hypercall2(int, multicall, call_list, nr_calls); 349 } 350 351 static inline int 352 HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, 353 unsigned long flags) 354 { 355 if (sizeof(new_val) == sizeof(long)) 356 return _hypercall3(int, update_va_mapping, va, 357 new_val.pte, flags); 358 else 359 return _hypercall4(int, update_va_mapping, va, 360 new_val.pte, new_val.pte >> 32, flags); 361 } 362 extern int __must_check xen_event_channel_op_compat(int, void *); 363 364 static inline int 365 HYPERVISOR_event_channel_op(int cmd, void *arg) 366 { 367 int rc = _hypercall2(int, event_channel_op, cmd, arg); 368 if (unlikely(rc == -ENOSYS)) 369 rc = xen_event_channel_op_compat(cmd, arg); 370 return rc; 371 } 372 373 static inline int 374 HYPERVISOR_xen_version(int cmd, void *arg) 375 { 376 return _hypercall2(int, xen_version, cmd, arg); 377 } 378 379 static inline int 380 HYPERVISOR_console_io(int cmd, int count, char *str) 381 { 382 return _hypercall3(int, console_io, cmd, count, str); 383 } 384 385 extern int __must_check xen_physdev_op_compat(int, void *); 386 387 static inline int 388 HYPERVISOR_physdev_op(int cmd, void *arg) 389 { 390 int rc = _hypercall2(int, physdev_op, cmd, arg); 391 if (unlikely(rc == -ENOSYS)) 392 rc = xen_physdev_op_compat(cmd, arg); 393 return rc; 394 } 395 396 static inline int 397 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) 398 { 399 return _hypercall3(int, grant_table_op, cmd, uop, count); 400 } 401 402 static inline int 403 HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val, 404 unsigned long flags, domid_t domid) 405 { 406 if (sizeof(new_val) == sizeof(long)) 407 return _hypercall4(int, update_va_mapping_otherdomain, va, 408 new_val.pte, flags, domid); 409 else 410 return _hypercall5(int, update_va_mapping_otherdomain, va, 411 new_val.pte, new_val.pte >> 32, 412 flags, domid); 413 } 414 415 static inline int 416 HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type) 417 { 418 return _hypercall2(int, vm_assist, cmd, type); 419 } 420 421 static inline int 422 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args) 423 { 424 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); 425 } 426 427 #ifdef CONFIG_X86_64 428 static inline int 429 HYPERVISOR_set_segment_base(int reg, unsigned long value) 430 { 431 return _hypercall2(int, set_segment_base, reg, value); 432 } 433 #endif 434 435 static inline int 436 HYPERVISOR_suspend(unsigned long start_info_mfn) 437 { 438 struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 439 440 /* 441 * For a PV guest the tools require that the start_info mfn be 442 * present in rdx/edx when the hypercall is made. Per the 443 * hypercall calling convention this is the third hypercall 444 * argument, which is start_info_mfn here. 445 */ 446 return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn); 447 } 448 449 static inline int 450 HYPERVISOR_nmi_op(unsigned long op, unsigned long arg) 451 { 452 return _hypercall2(int, nmi_op, op, arg); 453 } 454 455 static inline unsigned long __must_check 456 HYPERVISOR_hvm_op(int op, void *arg) 457 { 458 return _hypercall2(unsigned long, hvm_op, op, arg); 459 } 460 461 static inline int 462 HYPERVISOR_tmem_op( 463 struct tmem_op *op) 464 { 465 return _hypercall1(int, tmem_op, op); 466 } 467 468 static inline void 469 MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) 470 { 471 mcl->op = __HYPERVISOR_fpu_taskswitch; 472 mcl->args[0] = set; 473 474 trace_xen_mc_entry(mcl, 1); 475 } 476 477 static inline void 478 MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 479 pte_t new_val, unsigned long flags) 480 { 481 mcl->op = __HYPERVISOR_update_va_mapping; 482 mcl->args[0] = va; 483 if (sizeof(new_val) == sizeof(long)) { 484 mcl->args[1] = new_val.pte; 485 mcl->args[2] = flags; 486 } else { 487 mcl->args[1] = new_val.pte; 488 mcl->args[2] = new_val.pte >> 32; 489 mcl->args[3] = flags; 490 } 491 492 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4); 493 } 494 495 static inline void 496 MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, 497 void *uop, unsigned int count) 498 { 499 mcl->op = __HYPERVISOR_grant_table_op; 500 mcl->args[0] = cmd; 501 mcl->args[1] = (unsigned long)uop; 502 mcl->args[2] = count; 503 504 trace_xen_mc_entry(mcl, 3); 505 } 506 507 static inline void 508 MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va, 509 pte_t new_val, unsigned long flags, 510 domid_t domid) 511 { 512 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; 513 mcl->args[0] = va; 514 if (sizeof(new_val) == sizeof(long)) { 515 mcl->args[1] = new_val.pte; 516 mcl->args[2] = flags; 517 mcl->args[3] = domid; 518 } else { 519 mcl->args[1] = new_val.pte; 520 mcl->args[2] = new_val.pte >> 32; 521 mcl->args[3] = flags; 522 mcl->args[4] = domid; 523 } 524 525 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 4 : 5); 526 } 527 528 static inline void 529 MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, 530 struct desc_struct desc) 531 { 532 mcl->op = __HYPERVISOR_update_descriptor; 533 if (sizeof(maddr) == sizeof(long)) { 534 mcl->args[0] = maddr; 535 mcl->args[1] = *(unsigned long *)&desc; 536 } else { 537 mcl->args[0] = maddr; 538 mcl->args[1] = maddr >> 32; 539 mcl->args[2] = desc.a; 540 mcl->args[3] = desc.b; 541 } 542 543 trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4); 544 } 545 546 static inline void 547 MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg) 548 { 549 mcl->op = __HYPERVISOR_memory_op; 550 mcl->args[0] = cmd; 551 mcl->args[1] = (unsigned long)arg; 552 553 trace_xen_mc_entry(mcl, 2); 554 } 555 556 static inline void 557 MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, 558 int count, int *success_count, domid_t domid) 559 { 560 mcl->op = __HYPERVISOR_mmu_update; 561 mcl->args[0] = (unsigned long)req; 562 mcl->args[1] = count; 563 mcl->args[2] = (unsigned long)success_count; 564 mcl->args[3] = domid; 565 566 trace_xen_mc_entry(mcl, 4); 567 } 568 569 static inline void 570 MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count, 571 int *success_count, domid_t domid) 572 { 573 mcl->op = __HYPERVISOR_mmuext_op; 574 mcl->args[0] = (unsigned long)op; 575 mcl->args[1] = count; 576 mcl->args[2] = (unsigned long)success_count; 577 mcl->args[3] = domid; 578 579 trace_xen_mc_entry(mcl, 4); 580 } 581 582 static inline void 583 MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries) 584 { 585 mcl->op = __HYPERVISOR_set_gdt; 586 mcl->args[0] = (unsigned long)frames; 587 mcl->args[1] = entries; 588 589 trace_xen_mc_entry(mcl, 2); 590 } 591 592 static inline void 593 MULTI_stack_switch(struct multicall_entry *mcl, 594 unsigned long ss, unsigned long esp) 595 { 596 mcl->op = __HYPERVISOR_stack_switch; 597 mcl->args[0] = ss; 598 mcl->args[1] = esp; 599 600 trace_xen_mc_entry(mcl, 2); 601 } 602 603 #endif /* _ASM_X86_XEN_HYPERCALL_H */ 604