1 // SPDX-License-Identifier: GPL-2.0 2 3 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 4 #include <linux/memblock.h> 5 #endif 6 #include <linux/console.h> 7 #include <linux/cpu.h> 8 #include <linux/kexec.h> 9 #include <linux/memblock.h> 10 #include <linux/slab.h> 11 #include <linux/panic_notifier.h> 12 13 #include <xen/xen.h> 14 #include <xen/features.h> 15 #include <xen/interface/sched.h> 16 #include <xen/interface/version.h> 17 #include <xen/page.h> 18 19 #include <asm/xen/hypercall.h> 20 #include <asm/xen/hypervisor.h> 21 #include <asm/cpu.h> 22 #include <asm/e820/api.h> 23 #include <asm/setup.h> 24 25 #include "xen-ops.h" 26 #include "smp.h" 27 #include "pmu.h" 28 29 EXPORT_SYMBOL_GPL(hypercall_page); 30 31 /* 32 * Pointer to the xen_vcpu_info structure or 33 * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info 34 * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info 35 * but during boot it is switched to point to xen_vcpu_info. 36 * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events. 37 * Make sure that xen_vcpu_info doesn't cross a page boundary by making it 38 * cache-line aligned (the struct is guaranteed to have a size of 64 bytes, 39 * which matches the cache line size of 64-bit x86 processors). 40 */ 41 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 42 DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info); 43 44 /* Linux <-> Xen vCPU id mapping */ 45 DEFINE_PER_CPU(uint32_t, xen_vcpu_id); 46 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); 47 48 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; 49 EXPORT_SYMBOL(machine_to_phys_mapping); 50 unsigned long machine_to_phys_nr; 51 EXPORT_SYMBOL(machine_to_phys_nr); 52 53 struct start_info *xen_start_info; 54 EXPORT_SYMBOL_GPL(xen_start_info); 55 56 struct shared_info xen_dummy_shared_info; 57 58 __read_mostly bool xen_have_vector_callback = true; 59 EXPORT_SYMBOL_GPL(xen_have_vector_callback); 60 61 /* 62 * NB: These need to live in .data or alike because they're used by 63 * xen_prepare_pvh() which runs before clearing the bss. 64 */ 65 enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE; 66 EXPORT_SYMBOL_GPL(xen_domain_type); 67 uint32_t __ro_after_init xen_start_flags; 68 EXPORT_SYMBOL(xen_start_flags); 69 70 /* 71 * Point at some empty memory to start with. We map the real shared_info 72 * page as soon as fixmap is up and running. 73 */ 74 struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; 75 76 static int xen_cpu_up_online(unsigned int cpu) 77 { 78 xen_init_lock_cpu(cpu); 79 return 0; 80 } 81 82 int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), 83 int (*cpu_dead_cb)(unsigned int)) 84 { 85 int rc; 86 87 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, 88 "x86/xen/guest:prepare", 89 cpu_up_prepare_cb, cpu_dead_cb); 90 if (rc >= 0) { 91 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 92 "x86/xen/guest:online", 93 xen_cpu_up_online, NULL); 94 if (rc < 0) 95 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); 96 } 97 98 return rc >= 0 ? 0 : rc; 99 } 100 101 static void xen_vcpu_setup_restore(int cpu) 102 { 103 /* Any per_cpu(xen_vcpu) is stale, so reset it */ 104 xen_vcpu_info_reset(cpu); 105 106 /* 107 * For PVH and PVHVM, setup online VCPUs only. The rest will 108 * be handled by hotplug. 109 */ 110 if (xen_pv_domain() || 111 (xen_hvm_domain() && cpu_online(cpu))) 112 xen_vcpu_setup(cpu); 113 } 114 115 /* 116 * On restore, set the vcpu placement up again. 117 * If it fails, then we're in a bad state, since 118 * we can't back out from using it... 119 */ 120 void xen_vcpu_restore(void) 121 { 122 int cpu; 123 124 for_each_possible_cpu(cpu) { 125 bool other_cpu = (cpu != smp_processor_id()); 126 bool is_up; 127 128 if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID) 129 continue; 130 131 /* Only Xen 4.5 and higher support this. */ 132 is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, 133 xen_vcpu_nr(cpu), NULL) > 0; 134 135 if (other_cpu && is_up && 136 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL)) 137 BUG(); 138 139 if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock)) 140 xen_setup_runstate_info(cpu); 141 142 xen_vcpu_setup_restore(cpu); 143 144 if (other_cpu && is_up && 145 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL)) 146 BUG(); 147 } 148 } 149 150 void xen_vcpu_info_reset(int cpu) 151 { 152 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) { 153 per_cpu(xen_vcpu, cpu) = 154 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)]; 155 } else { 156 /* Set to NULL so that if somebody accesses it we get an OOPS */ 157 per_cpu(xen_vcpu, cpu) = NULL; 158 } 159 } 160 161 void xen_vcpu_setup(int cpu) 162 { 163 struct vcpu_register_vcpu_info info; 164 int err; 165 struct vcpu_info *vcpup; 166 167 BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES); 168 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 169 170 /* 171 * This path is called on PVHVM at bootup (xen_hvm_smp_prepare_boot_cpu) 172 * and at restore (xen_vcpu_restore). Also called for hotplugged 173 * VCPUs (cpu_init -> xen_hvm_cpu_prepare_hvm). 174 * However, the hypercall can only be done once (see below) so if a VCPU 175 * is offlined and comes back online then let's not redo the hypercall. 176 * 177 * For PV it is called during restore (xen_vcpu_restore) and bootup 178 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not 179 * use this function. 180 */ 181 if (xen_hvm_domain()) { 182 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) 183 return; 184 } 185 186 vcpup = &per_cpu(xen_vcpu_info, cpu); 187 info.mfn = arbitrary_virt_to_mfn(vcpup); 188 info.offset = offset_in_page(vcpup); 189 190 /* 191 * N.B. This hypercall can _only_ be called once per CPU. 192 * Subsequent calls will error out with -EINVAL. This is due to 193 * the fact that hypervisor has no unregister variant and this 194 * hypercall does not allow to over-write info.mfn and 195 * info.offset. 196 */ 197 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu), 198 &info); 199 if (err) 200 panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err); 201 202 per_cpu(xen_vcpu, cpu) = vcpup; 203 } 204 205 void __init xen_banner(void) 206 { 207 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL); 208 struct xen_extraversion extra; 209 210 HYPERVISOR_xen_version(XENVER_extraversion, &extra); 211 212 pr_info("Booting kernel on %s\n", pv_info.name); 213 pr_info("Xen version: %u.%u%s%s\n", 214 version >> 16, version & 0xffff, extra.extraversion, 215 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) 216 ? " (preserve-AD)" : ""); 217 } 218 219 /* Check if running on Xen version (major, minor) or later */ 220 bool xen_running_on_version_or_later(unsigned int major, unsigned int minor) 221 { 222 unsigned int version; 223 224 if (!xen_domain()) 225 return false; 226 227 version = HYPERVISOR_xen_version(XENVER_version, NULL); 228 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) || 229 ((version >> 16) > major)) 230 return true; 231 return false; 232 } 233 234 void __init xen_add_preferred_consoles(void) 235 { 236 add_preferred_console("xenboot", 0, NULL); 237 if (!boot_params.screen_info.orig_video_isVGA) 238 add_preferred_console("tty", 0, NULL); 239 add_preferred_console("hvc", 0, NULL); 240 if (boot_params.screen_info.orig_video_isVGA) 241 add_preferred_console("tty", 0, NULL); 242 } 243 244 void xen_reboot(int reason) 245 { 246 struct sched_shutdown r = { .reason = reason }; 247 int cpu; 248 249 for_each_online_cpu(cpu) 250 xen_pmu_finish(cpu); 251 252 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 253 BUG(); 254 } 255 256 static int reboot_reason = SHUTDOWN_reboot; 257 static bool xen_legacy_crash; 258 void xen_emergency_restart(void) 259 { 260 xen_reboot(reboot_reason); 261 } 262 263 static int 264 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) 265 { 266 if (!kexec_crash_loaded()) { 267 if (xen_legacy_crash) 268 xen_reboot(SHUTDOWN_crash); 269 270 reboot_reason = SHUTDOWN_crash; 271 272 /* 273 * If panic_timeout==0 then we are supposed to wait forever. 274 * However, to preserve original dom0 behavior we have to drop 275 * into hypervisor. (domU behavior is controlled by its 276 * config file) 277 */ 278 if (panic_timeout == 0) 279 panic_timeout = -1; 280 } 281 return NOTIFY_DONE; 282 } 283 284 static int __init parse_xen_legacy_crash(char *arg) 285 { 286 xen_legacy_crash = true; 287 return 0; 288 } 289 early_param("xen_legacy_crash", parse_xen_legacy_crash); 290 291 static struct notifier_block xen_panic_block = { 292 .notifier_call = xen_panic_event, 293 .priority = INT_MIN 294 }; 295 296 int xen_panic_handler_init(void) 297 { 298 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); 299 return 0; 300 } 301 302 void xen_pin_vcpu(int cpu) 303 { 304 static bool disable_pinning; 305 struct sched_pin_override pin_override; 306 int ret; 307 308 if (disable_pinning) 309 return; 310 311 pin_override.pcpu = cpu; 312 ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override); 313 314 /* Ignore errors when removing override. */ 315 if (cpu < 0) 316 return; 317 318 switch (ret) { 319 case -ENOSYS: 320 pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n", 321 cpu); 322 disable_pinning = true; 323 break; 324 case -EPERM: 325 WARN(1, "Trying to pin vcpu without having privilege to do so\n"); 326 disable_pinning = true; 327 break; 328 case -EINVAL: 329 case -EBUSY: 330 pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n", 331 cpu); 332 break; 333 case 0: 334 break; 335 default: 336 WARN(1, "rc %d while trying to pin vcpu\n", ret); 337 disable_pinning = true; 338 } 339 } 340 341 #ifdef CONFIG_HOTPLUG_CPU 342 void xen_arch_register_cpu(int num) 343 { 344 arch_register_cpu(num); 345 } 346 EXPORT_SYMBOL(xen_arch_register_cpu); 347 348 void xen_arch_unregister_cpu(int num) 349 { 350 arch_unregister_cpu(num); 351 } 352 EXPORT_SYMBOL(xen_arch_unregister_cpu); 353 #endif 354 355 /* Amount of extra memory space we add to the e820 ranges */ 356 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 357 358 void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns) 359 { 360 unsigned int i; 361 362 /* 363 * No need to check for zero size, should happen rarely and will only 364 * write a new entry regarded to be unused due to zero size. 365 */ 366 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 367 /* Add new region. */ 368 if (xen_extra_mem[i].n_pfns == 0) { 369 xen_extra_mem[i].start_pfn = start_pfn; 370 xen_extra_mem[i].n_pfns = n_pfns; 371 break; 372 } 373 /* Append to existing region. */ 374 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns == 375 start_pfn) { 376 xen_extra_mem[i].n_pfns += n_pfns; 377 break; 378 } 379 } 380 if (i == XEN_EXTRA_MEM_MAX_REGIONS) 381 printk(KERN_WARNING "Warning: not enough extra memory regions\n"); 382 383 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); 384 } 385 386 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC 387 int __init arch_xen_unpopulated_init(struct resource **res) 388 { 389 unsigned int i; 390 391 if (!xen_domain()) 392 return -ENODEV; 393 394 /* Must be set strictly before calling xen_free_unpopulated_pages(). */ 395 *res = &iomem_resource; 396 397 /* 398 * Initialize with pages from the extra memory regions (see 399 * arch/x86/xen/setup.c). 400 */ 401 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 402 unsigned int j; 403 404 for (j = 0; j < xen_extra_mem[i].n_pfns; j++) { 405 struct page *pg = 406 pfn_to_page(xen_extra_mem[i].start_pfn + j); 407 408 xen_free_unpopulated_pages(1, &pg); 409 } 410 411 /* Zero so region is not also added to the balloon driver. */ 412 xen_extra_mem[i].n_pfns = 0; 413 } 414 415 return 0; 416 } 417 #endif 418