1/* SPDX-License-Identifier: GPL-2.0 */ 2 3/* 4 * Copyright C 2016, Oracle and/or its affiliates. All rights reserved. 5 */ 6 7 .code32 8 .text 9#define _pa(x) ((x) - __START_KERNEL_map) 10 11#include <linux/elfnote.h> 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/segment.h> 15#include <asm/asm.h> 16#include <asm/boot.h> 17#include <asm/processor-flags.h> 18#include <asm/msr.h> 19#include <asm/nospec-branch.h> 20#include <xen/interface/elfnote.h> 21 22 __HEAD 23 24/* 25 * Entry point for PVH guests. 26 * 27 * Xen ABI specifies the following register state when we come here: 28 * 29 * - `ebx`: contains the physical memory address where the loader has placed 30 * the boot start info structure. 31 * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared. 32 * - `cr4`: all bits are cleared. 33 * - `cs `: must be a 32-bit read/execute code segment with a base of `0` 34 * and a limit of `0xFFFFFFFF`. The selector value is unspecified. 35 * - `ds`, `es`: must be a 32-bit read/write data segment with a base of 36 * `0` and a limit of `0xFFFFFFFF`. The selector values are all 37 * unspecified. 38 * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit 39 * of '0x67'. 40 * - `eflags`: bit 17 (VM) must be cleared. Bit 9 (IF) must be cleared. 41 * Bit 8 (TF) must be cleared. Other bits are all unspecified. 42 * 43 * All other processor registers and flag bits are unspecified. The OS is in 44 * charge of setting up it's own stack, GDT and IDT. 45 */ 46 47#define PVH_GDT_ENTRY_CS 1 48#define PVH_GDT_ENTRY_DS 2 49#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8) 50#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) 51 52SYM_CODE_START_LOCAL(pvh_start_xen) 53 UNWIND_HINT_END_OF_STACK 54 cld 55 56 lgdt (_pa(gdt)) 57 58 mov $PVH_DS_SEL,%eax 59 mov %eax,%ds 60 mov %eax,%es 61 mov %eax,%ss 62 63 /* Stash hvm_start_info. */ 64 mov $_pa(pvh_start_info), %edi 65 mov %ebx, %esi 66 mov _pa(pvh_start_info_sz), %ecx 67 shr $2,%ecx 68 rep 69 movsl 70 71 mov $_pa(early_stack_end), %esp 72 73 /* Enable PAE mode. */ 74 mov %cr4, %eax 75 orl $X86_CR4_PAE, %eax 76 mov %eax, %cr4 77 78#ifdef CONFIG_X86_64 79 /* Enable Long mode. */ 80 mov $MSR_EFER, %ecx 81 rdmsr 82 btsl $_EFER_LME, %eax 83 wrmsr 84 85 /* Enable pre-constructed page tables. */ 86 mov $_pa(init_top_pgt), %eax 87 mov %eax, %cr3 88 mov $(X86_CR0_PG | X86_CR0_PE), %eax 89 mov %eax, %cr0 90 91 /* Jump to 64-bit mode. */ 92 ljmp $PVH_CS_SEL, $_pa(1f) 93 94 /* 64-bit entry point. */ 95 .code64 961: 97 /* Set base address in stack canary descriptor. */ 98 mov $MSR_GS_BASE,%ecx 99 mov $_pa(canary), %eax 100 xor %edx, %edx 101 wrmsr 102 103 /* 104 * Calculate load offset and store in phys_base. __pa() needs 105 * phys_base set to calculate the hypercall page in xen_pvh_init(). 106 */ 107 movq %rbp, %rbx 108 subq $_pa(pvh_start_xen), %rbx 109 movq %rbx, phys_base(%rip) 110 111 /* Call xen_prepare_pvh() via the kernel virtual mapping */ 112 leaq xen_prepare_pvh(%rip), %rax 113 subq phys_base(%rip), %rax 114 addq $__START_KERNEL_map, %rax 115 ANNOTATE_RETPOLINE_SAFE 116 call *%rax 117 118 /* 119 * Clear phys_base. __startup_64 will *add* to its value, 120 * so reset to 0. 121 */ 122 xor %rbx, %rbx 123 movq %rbx, phys_base(%rip) 124 125 /* startup_64 expects boot_params in %rsi. */ 126 mov $_pa(pvh_bootparams), %rsi 127 mov $_pa(startup_64), %rax 128 ANNOTATE_RETPOLINE_SAFE 129 jmp *%rax 130 131#else /* CONFIG_X86_64 */ 132 133 call mk_early_pgtbl_32 134 135 mov $_pa(initial_page_table), %eax 136 mov %eax, %cr3 137 138 mov %cr0, %eax 139 or $(X86_CR0_PG | X86_CR0_PE), %eax 140 mov %eax, %cr0 141 142 ljmp $PVH_CS_SEL, $1f 1431: 144 call xen_prepare_pvh 145 mov $_pa(pvh_bootparams), %esi 146 147 /* startup_32 doesn't expect paging and PAE to be on. */ 148 ljmp $PVH_CS_SEL, $_pa(2f) 1492: 150 mov %cr0, %eax 151 and $~X86_CR0_PG, %eax 152 mov %eax, %cr0 153 mov %cr4, %eax 154 and $~X86_CR4_PAE, %eax 155 mov %eax, %cr4 156 157 ljmp $PVH_CS_SEL, $_pa(startup_32) 158#endif 159SYM_CODE_END(pvh_start_xen) 160 161 .section ".init.data","aw" 162 .balign 8 163SYM_DATA_START_LOCAL(gdt) 164 .word gdt_end - gdt_start 165 .long _pa(gdt_start) 166 .word 0 167SYM_DATA_END(gdt) 168SYM_DATA_START_LOCAL(gdt_start) 169 .quad 0x0000000000000000 /* NULL descriptor */ 170#ifdef CONFIG_X86_64 171 .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */ 172#else 173 .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */ 174#endif 175 .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */ 176SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end) 177 178 .balign 16 179SYM_DATA_LOCAL(canary, .fill 48, 1, 0) 180 181SYM_DATA_START_LOCAL(early_stack) 182 .fill BOOT_STACK_SIZE, 1, 0 183SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end) 184 185 ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, 186 _ASM_PTR (pvh_start_xen - __START_KERNEL_map)) 187