1af873fceSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 282869ac5SJames Morse /*: 382869ac5SJames Morse * Hibernate support specific for ARM64 482869ac5SJames Morse * 582869ac5SJames Morse * Derived from work on ARM hibernation support by: 682869ac5SJames Morse * 782869ac5SJames Morse * Ubuntu project, hibernation support for mach-dove 882869ac5SJames Morse * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) 982869ac5SJames Morse * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) 1082869ac5SJames Morse * https://lkml.org/lkml/2010/6/18/4 1182869ac5SJames Morse * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html 1282869ac5SJames Morse * https://patchwork.kernel.org/patch/96442/ 1382869ac5SJames Morse * 1482869ac5SJames Morse * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 1582869ac5SJames Morse */ 1682869ac5SJames Morse #define pr_fmt(x) "hibernate: " x 178ec058fdSJames Morse #include <linux/cpu.h> 1882869ac5SJames Morse #include <linux/kvm_host.h> 1982869ac5SJames Morse #include <linux/pm.h> 2082869ac5SJames Morse #include <linux/sched.h> 2182869ac5SJames Morse #include <linux/suspend.h> 2282869ac5SJames Morse #include <linux/utsname.h> 2382869ac5SJames Morse 2482869ac5SJames Morse #include <asm/barrier.h> 2582869ac5SJames Morse #include <asm/cacheflush.h> 268ec058fdSJames Morse #include <asm/cputype.h> 270fbeb318SJames Morse #include <asm/daifflags.h> 2882869ac5SJames Morse #include <asm/irqflags.h> 29254a41c0SAKASHI Takahiro #include <asm/kexec.h> 3082869ac5SJames Morse #include <asm/memory.h> 3182869ac5SJames Morse #include <asm/mmu_context.h> 32ee11f332SSteven Price #include <asm/mte.h> 3382869ac5SJames Morse #include <asm/sections.h> 34d74b4e4fSJames Morse #include <asm/smp.h> 358ec058fdSJames Morse #include <asm/smp_plat.h> 3682869ac5SJames Morse #include <asm/suspend.h> 370194e760SMark Rutland #include <asm/sysreg.h> 38072e3d96SPavel Tatashin #include <asm/trans_pgd.h> 3982869ac5SJames Morse #include <asm/virt.h> 4082869ac5SJames Morse 4182869ac5SJames Morse /* 4282869ac5SJames Morse * Hibernate core relies on this value being 0 on resume, and marks it 4382869ac5SJames Morse * __nosavedata assuming it will keep the resume kernel's '0' value. This 4482869ac5SJames Morse * doesn't happen with either KASLR. 4582869ac5SJames Morse * 4682869ac5SJames Morse * defined as "__visible int in_suspend __nosavedata" in 4782869ac5SJames Morse * kernel/power/hibernate.c 4882869ac5SJames Morse */ 4982869ac5SJames Morse extern int in_suspend; 5082869ac5SJames Morse 5182869ac5SJames Morse /* Do we need to reset el2? */ 52094a3684SPasha Tatashin #define el2_reset_needed() (is_hyp_nvhe()) 5382869ac5SJames Morse 5482869ac5SJames Morse /* hyp-stub vectors, used to restore el2 during resume from hibernate. */ 5582869ac5SJames Morse extern char __hyp_stub_vectors[]; 5682869ac5SJames Morse 5782869ac5SJames Morse /* 588ec058fdSJames Morse * The logical cpu number we should resume on, initialised to a non-cpu 598ec058fdSJames Morse * number. 608ec058fdSJames Morse */ 618ec058fdSJames Morse static int sleep_cpu = -EINVAL; 628ec058fdSJames Morse 638ec058fdSJames Morse /* 6482869ac5SJames Morse * Values that may not change over hibernate/resume. We put the build number 6582869ac5SJames Morse * and date in here so that we guarantee not to resume with a different 6682869ac5SJames Morse * kernel. 6782869ac5SJames Morse */ 6882869ac5SJames Morse struct arch_hibernate_hdr_invariants { 6982869ac5SJames Morse char uts_version[__NEW_UTS_LEN + 1]; 7082869ac5SJames Morse }; 7182869ac5SJames Morse 7282869ac5SJames Morse /* These values need to be know across a hibernate/restore. */ 7382869ac5SJames Morse static struct arch_hibernate_hdr { 7482869ac5SJames Morse struct arch_hibernate_hdr_invariants invariants; 7582869ac5SJames Morse 7682869ac5SJames Morse /* These are needed to find the relocated kernel if built with kaslr */ 7782869ac5SJames Morse phys_addr_t ttbr1_el1; 7882869ac5SJames Morse void (*reenter_kernel)(void); 7982869ac5SJames Morse 8082869ac5SJames Morse /* 8182869ac5SJames Morse * We need to know where the __hyp_stub_vectors are after restore to 8282869ac5SJames Morse * re-configure el2. 8382869ac5SJames Morse */ 8482869ac5SJames Morse phys_addr_t __hyp_stub_vectors; 858ec058fdSJames Morse 868ec058fdSJames Morse u64 sleep_cpu_mpidr; 8782869ac5SJames Morse } resume_hdr; 8882869ac5SJames Morse 8982869ac5SJames Morse static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) 9082869ac5SJames Morse { 9182869ac5SJames Morse memset(i, 0, sizeof(*i)); 9282869ac5SJames Morse memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); 9382869ac5SJames Morse } 9482869ac5SJames Morse 9582869ac5SJames Morse int pfn_is_nosave(unsigned long pfn) 9682869ac5SJames Morse { 972077be67SLaura Abbott unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin); 982077be67SLaura Abbott unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1); 9982869ac5SJames Morse 100254a41c0SAKASHI Takahiro return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) || 101254a41c0SAKASHI Takahiro crash_is_nosave(pfn); 10282869ac5SJames Morse } 10382869ac5SJames Morse 10482869ac5SJames Morse void notrace save_processor_state(void) 10582869ac5SJames Morse { 10682869ac5SJames Morse WARN_ON(num_online_cpus() != 1); 10782869ac5SJames Morse } 10882869ac5SJames Morse 10982869ac5SJames Morse void notrace restore_processor_state(void) 11082869ac5SJames Morse { 11182869ac5SJames Morse } 11282869ac5SJames Morse 11382869ac5SJames Morse int arch_hibernation_header_save(void *addr, unsigned int max_size) 11482869ac5SJames Morse { 11582869ac5SJames Morse struct arch_hibernate_hdr *hdr = addr; 11682869ac5SJames Morse 11782869ac5SJames Morse if (max_size < sizeof(*hdr)) 11882869ac5SJames Morse return -EOVERFLOW; 11982869ac5SJames Morse 12082869ac5SJames Morse arch_hdr_invariants(&hdr->invariants); 1212077be67SLaura Abbott hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir); 12282869ac5SJames Morse hdr->reenter_kernel = _cpu_resume; 12382869ac5SJames Morse 12482869ac5SJames Morse /* We can't use __hyp_get_vectors() because kvm may still be loaded */ 12582869ac5SJames Morse if (el2_reset_needed()) 1262077be67SLaura Abbott hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors); 12782869ac5SJames Morse else 12882869ac5SJames Morse hdr->__hyp_stub_vectors = 0; 12982869ac5SJames Morse 1308ec058fdSJames Morse /* Save the mpidr of the cpu we called cpu_suspend() on... */ 1318ec058fdSJames Morse if (sleep_cpu < 0) { 1329165dabbSMasanari Iida pr_err("Failing to hibernate on an unknown CPU.\n"); 1338ec058fdSJames Morse return -ENODEV; 1348ec058fdSJames Morse } 1358ec058fdSJames Morse hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu); 1368ec058fdSJames Morse pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu, 1378ec058fdSJames Morse hdr->sleep_cpu_mpidr); 1388ec058fdSJames Morse 13982869ac5SJames Morse return 0; 14082869ac5SJames Morse } 14182869ac5SJames Morse EXPORT_SYMBOL(arch_hibernation_header_save); 14282869ac5SJames Morse 14382869ac5SJames Morse int arch_hibernation_header_restore(void *addr) 14482869ac5SJames Morse { 1458ec058fdSJames Morse int ret; 14682869ac5SJames Morse struct arch_hibernate_hdr_invariants invariants; 14782869ac5SJames Morse struct arch_hibernate_hdr *hdr = addr; 14882869ac5SJames Morse 14982869ac5SJames Morse arch_hdr_invariants(&invariants); 15082869ac5SJames Morse if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { 15182869ac5SJames Morse pr_crit("Hibernate image not generated by this kernel!\n"); 15282869ac5SJames Morse return -EINVAL; 15382869ac5SJames Morse } 15482869ac5SJames Morse 1558ec058fdSJames Morse sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr); 1568ec058fdSJames Morse pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu, 1578ec058fdSJames Morse hdr->sleep_cpu_mpidr); 1588ec058fdSJames Morse if (sleep_cpu < 0) { 1598ec058fdSJames Morse pr_crit("Hibernated on a CPU not known to this kernel!\n"); 1608ec058fdSJames Morse sleep_cpu = -EINVAL; 1618ec058fdSJames Morse return -EINVAL; 1628ec058fdSJames Morse } 163e646ac5bSQais Yousef 164e646ac5bSQais Yousef ret = bringup_hibernate_cpu(sleep_cpu); 1658ec058fdSJames Morse if (ret) { 1668ec058fdSJames Morse sleep_cpu = -EINVAL; 1678ec058fdSJames Morse return ret; 1688ec058fdSJames Morse } 1698ec058fdSJames Morse 17082869ac5SJames Morse resume_hdr = *hdr; 17182869ac5SJames Morse 17282869ac5SJames Morse return 0; 17382869ac5SJames Morse } 17482869ac5SJames Morse EXPORT_SYMBOL(arch_hibernation_header_restore); 17582869ac5SJames Morse 17650f53fb7SPavel Tatashin static void *hibernate_page_alloc(void *arg) 17750f53fb7SPavel Tatashin { 178d1bbc35fSPavel Tatashin return (void *)get_safe_page((__force gfp_t)(unsigned long)arg); 17950f53fb7SPavel Tatashin } 18050f53fb7SPavel Tatashin 181a2c2e679SPavel Tatashin /* 182a2c2e679SPavel Tatashin * Copies length bytes, starting at src_start into an new page, 183a2c2e679SPavel Tatashin * perform cache maintenance, then maps it at the specified address low 184a2c2e679SPavel Tatashin * address as executable. 185a2c2e679SPavel Tatashin * 186a2c2e679SPavel Tatashin * This is used by hibernate to copy the code it needs to execute when 187a2c2e679SPavel Tatashin * overwriting the kernel text. This function generates a new set of page 188a2c2e679SPavel Tatashin * tables, which it loads into ttbr0. 189a2c2e679SPavel Tatashin * 190a2c2e679SPavel Tatashin * Length is provided as we probably only want 4K of data, even on a 64K 191a2c2e679SPavel Tatashin * page system. 192a2c2e679SPavel Tatashin */ 193a2c2e679SPavel Tatashin static int create_safe_exec_page(void *src_start, size_t length, 194a2c2e679SPavel Tatashin phys_addr_t *phys_dst_addr) 195a2c2e679SPavel Tatashin { 19650f53fb7SPavel Tatashin struct trans_pgd_info trans_info = { 19750f53fb7SPavel Tatashin .trans_alloc_page = hibernate_page_alloc, 198d1bbc35fSPavel Tatashin .trans_alloc_arg = (__force void *)GFP_ATOMIC, 19950f53fb7SPavel Tatashin }; 20050f53fb7SPavel Tatashin 201a2c2e679SPavel Tatashin void *page = (void *)get_safe_page(GFP_ATOMIC); 2027018d467SJames Morse phys_addr_t trans_ttbr0; 2037018d467SJames Morse unsigned long t0sz; 204a2c2e679SPavel Tatashin int rc; 205a2c2e679SPavel Tatashin 206a2c2e679SPavel Tatashin if (!page) 207a2c2e679SPavel Tatashin return -ENOMEM; 208a2c2e679SPavel Tatashin 209a2c2e679SPavel Tatashin memcpy(page, src_start, length); 210fade9c2cSFuad Tabba caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length); 2117018d467SJames Morse rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page); 212a2c2e679SPavel Tatashin if (rc) 213a2c2e679SPavel Tatashin return rc; 214a2c2e679SPavel Tatashin 2150194e760SMark Rutland /* 2160194e760SMark Rutland * Load our new page tables. A strict BBM approach requires that we 2170194e760SMark Rutland * ensure that TLBs are free of any entries that may overlap with the 2180194e760SMark Rutland * global mappings we are about to install. 2190194e760SMark Rutland * 2200194e760SMark Rutland * For a real hibernate/resume cycle TTBR0 currently points to a zero 2210194e760SMark Rutland * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI 2220194e760SMark Rutland * runtime services), while for a userspace-driven test_resume cycle it 2230194e760SMark Rutland * points to userspace page tables (and we must point it at a zero page 2247018d467SJames Morse * ourselves). 2257018d467SJames Morse * 2267018d467SJames Morse * We change T0SZ as part of installing the idmap. This is undone by 2277018d467SJames Morse * cpu_uninstall_idmap() in __cpu_suspend_exit(). 2280194e760SMark Rutland */ 2290194e760SMark Rutland cpu_set_reserved_ttbr0(); 2300194e760SMark Rutland local_flush_tlb_all(); 2317018d467SJames Morse __cpu_set_tcr_t0sz(t0sz); 2327018d467SJames Morse write_sysreg(trans_ttbr0, ttbr0_el1); 2330194e760SMark Rutland isb(); 23482869ac5SJames Morse 23513373f0eSPavel Tatashin *phys_dst_addr = virt_to_phys(page); 23682869ac5SJames Morse 237a89d7ff9SPavel Tatashin return 0; 23882869ac5SJames Morse } 23982869ac5SJames Morse 240ee11f332SSteven Price #ifdef CONFIG_ARM64_MTE 241ee11f332SSteven Price 242ee11f332SSteven Price static DEFINE_XARRAY(mte_pages); 243ee11f332SSteven Price 244ee11f332SSteven Price static int save_tags(struct page *page, unsigned long pfn) 245ee11f332SSteven Price { 246ee11f332SSteven Price void *tag_storage, *ret; 247ee11f332SSteven Price 248ee11f332SSteven Price tag_storage = mte_allocate_tag_storage(); 249ee11f332SSteven Price if (!tag_storage) 250ee11f332SSteven Price return -ENOMEM; 251ee11f332SSteven Price 252ee11f332SSteven Price mte_save_page_tags(page_address(page), tag_storage); 253ee11f332SSteven Price 254ee11f332SSteven Price ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL); 255ee11f332SSteven Price if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { 256ee11f332SSteven Price mte_free_tag_storage(tag_storage); 257ee11f332SSteven Price return xa_err(ret); 258ee11f332SSteven Price } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) { 259ee11f332SSteven Price mte_free_tag_storage(ret); 260ee11f332SSteven Price } 261ee11f332SSteven Price 262ee11f332SSteven Price return 0; 263ee11f332SSteven Price } 264ee11f332SSteven Price 265ee11f332SSteven Price static void swsusp_mte_free_storage(void) 266ee11f332SSteven Price { 267ee11f332SSteven Price XA_STATE(xa_state, &mte_pages, 0); 268ee11f332SSteven Price void *tags; 269ee11f332SSteven Price 270ee11f332SSteven Price xa_lock(&mte_pages); 271ee11f332SSteven Price xas_for_each(&xa_state, tags, ULONG_MAX) { 272ee11f332SSteven Price mte_free_tag_storage(tags); 273ee11f332SSteven Price } 274ee11f332SSteven Price xa_unlock(&mte_pages); 275ee11f332SSteven Price 276ee11f332SSteven Price xa_destroy(&mte_pages); 277ee11f332SSteven Price } 278ee11f332SSteven Price 279ee11f332SSteven Price static int swsusp_mte_save_tags(void) 280ee11f332SSteven Price { 281ee11f332SSteven Price struct zone *zone; 282ee11f332SSteven Price unsigned long pfn, max_zone_pfn; 283ee11f332SSteven Price int ret = 0; 284ee11f332SSteven Price int n = 0; 285ee11f332SSteven Price 286ee11f332SSteven Price if (!system_supports_mte()) 287ee11f332SSteven Price return 0; 288ee11f332SSteven Price 289ee11f332SSteven Price for_each_populated_zone(zone) { 290ee11f332SSteven Price max_zone_pfn = zone_end_pfn(zone); 291ee11f332SSteven Price for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 292ee11f332SSteven Price struct page *page = pfn_to_online_page(pfn); 293ee11f332SSteven Price 294ee11f332SSteven Price if (!page) 295ee11f332SSteven Price continue; 296ee11f332SSteven Price 297ee11f332SSteven Price if (!test_bit(PG_mte_tagged, &page->flags)) 298ee11f332SSteven Price continue; 299ee11f332SSteven Price 300ee11f332SSteven Price ret = save_tags(page, pfn); 301ee11f332SSteven Price if (ret) { 302ee11f332SSteven Price swsusp_mte_free_storage(); 303ee11f332SSteven Price goto out; 304ee11f332SSteven Price } 305ee11f332SSteven Price 306ee11f332SSteven Price n++; 307ee11f332SSteven Price } 308ee11f332SSteven Price } 309ee11f332SSteven Price pr_info("Saved %d MTE pages\n", n); 310ee11f332SSteven Price 311ee11f332SSteven Price out: 312ee11f332SSteven Price return ret; 313ee11f332SSteven Price } 314ee11f332SSteven Price 315ee11f332SSteven Price static void swsusp_mte_restore_tags(void) 316ee11f332SSteven Price { 317ee11f332SSteven Price XA_STATE(xa_state, &mte_pages, 0); 318ee11f332SSteven Price int n = 0; 319ee11f332SSteven Price void *tags; 320ee11f332SSteven Price 321ee11f332SSteven Price xa_lock(&mte_pages); 322ee11f332SSteven Price xas_for_each(&xa_state, tags, ULONG_MAX) { 323ee11f332SSteven Price unsigned long pfn = xa_state.xa_index; 324ee11f332SSteven Price struct page *page = pfn_to_online_page(pfn); 325ee11f332SSteven Price 326e5b8d921SVincenzo Frascino /* 327e5b8d921SVincenzo Frascino * It is not required to invoke page_kasan_tag_reset(page) 328e5b8d921SVincenzo Frascino * at this point since the tags stored in page->flags are 329e5b8d921SVincenzo Frascino * already restored. 330e5b8d921SVincenzo Frascino */ 331ee11f332SSteven Price mte_restore_page_tags(page_address(page), tags); 332ee11f332SSteven Price 333ee11f332SSteven Price mte_free_tag_storage(tags); 334ee11f332SSteven Price n++; 335ee11f332SSteven Price } 336ee11f332SSteven Price xa_unlock(&mte_pages); 337ee11f332SSteven Price 338ee11f332SSteven Price pr_info("Restored %d MTE pages\n", n); 339ee11f332SSteven Price 340ee11f332SSteven Price xa_destroy(&mte_pages); 341ee11f332SSteven Price } 342ee11f332SSteven Price 343ee11f332SSteven Price #else /* CONFIG_ARM64_MTE */ 344ee11f332SSteven Price 345ee11f332SSteven Price static int swsusp_mte_save_tags(void) 346ee11f332SSteven Price { 347ee11f332SSteven Price return 0; 348ee11f332SSteven Price } 349ee11f332SSteven Price 350ee11f332SSteven Price static void swsusp_mte_restore_tags(void) 351ee11f332SSteven Price { 352ee11f332SSteven Price } 353ee11f332SSteven Price 354ee11f332SSteven Price #endif /* CONFIG_ARM64_MTE */ 355ee11f332SSteven Price 35682869ac5SJames Morse int swsusp_arch_suspend(void) 35782869ac5SJames Morse { 35882869ac5SJames Morse int ret = 0; 35982869ac5SJames Morse unsigned long flags; 36082869ac5SJames Morse struct sleep_stack_data state; 36182869ac5SJames Morse 362d74b4e4fSJames Morse if (cpus_are_stuck_in_kernel()) { 363d74b4e4fSJames Morse pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n"); 364d74b4e4fSJames Morse return -EBUSY; 365d74b4e4fSJames Morse } 366d74b4e4fSJames Morse 3670fbeb318SJames Morse flags = local_daif_save(); 36882869ac5SJames Morse 36982869ac5SJames Morse if (__cpu_suspend_enter(&state)) { 370254a41c0SAKASHI Takahiro /* make the crash dump kernel image visible/saveable */ 371254a41c0SAKASHI Takahiro crash_prepare_suspend(); 372254a41c0SAKASHI Takahiro 373ee11f332SSteven Price ret = swsusp_mte_save_tags(); 374ee11f332SSteven Price if (ret) 375ee11f332SSteven Price return ret; 376ee11f332SSteven Price 3778ec058fdSJames Morse sleep_cpu = smp_processor_id(); 37882869ac5SJames Morse ret = swsusp_save(); 37982869ac5SJames Morse } else { 3805ebe3a44SJames Morse /* Clean kernel core startup/idle code to PoC*/ 381fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)__mmuoff_data_start, 382814b1860SFuad Tabba (unsigned long)__mmuoff_data_end); 383fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)__idmap_text_start, 384814b1860SFuad Tabba (unsigned long)__idmap_text_end); 3855ebe3a44SJames Morse 3865ebe3a44SJames Morse /* Clean kvm setup code to PoC? */ 387f7daa9c8SJames Morse if (el2_reset_needed()) { 388fade9c2cSFuad Tabba dcache_clean_inval_poc( 389814b1860SFuad Tabba (unsigned long)__hyp_idmap_text_start, 390814b1860SFuad Tabba (unsigned long)__hyp_idmap_text_end); 391fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)__hyp_text_start, 392814b1860SFuad Tabba (unsigned long)__hyp_text_end); 393f7daa9c8SJames Morse } 39482869ac5SJames Morse 395ee11f332SSteven Price swsusp_mte_restore_tags(); 396ee11f332SSteven Price 397254a41c0SAKASHI Takahiro /* make the crash dump kernel image protected again */ 398254a41c0SAKASHI Takahiro crash_post_resume(); 399254a41c0SAKASHI Takahiro 40082869ac5SJames Morse /* 40182869ac5SJames Morse * Tell the hibernation core that we've just restored 40282869ac5SJames Morse * the memory 40382869ac5SJames Morse */ 40482869ac5SJames Morse in_suspend = 0; 40582869ac5SJames Morse 4068ec058fdSJames Morse sleep_cpu = -EINVAL; 40782869ac5SJames Morse __cpu_suspend_exit(); 408647d0519SMarc Zyngier 409647d0519SMarc Zyngier /* 410647d0519SMarc Zyngier * Just in case the boot kernel did turn the SSBD 411647d0519SMarc Zyngier * mitigation off behind our back, let's set the state 412647d0519SMarc Zyngier * to what we expect it to be. 413647d0519SMarc Zyngier */ 414c2876207SWill Deacon spectre_v4_enable_mitigation(NULL); 41582869ac5SJames Morse } 41682869ac5SJames Morse 4170fbeb318SJames Morse local_daif_restore(flags); 41882869ac5SJames Morse 41982869ac5SJames Morse return ret; 42082869ac5SJames Morse } 42182869ac5SJames Morse 42282869ac5SJames Morse /* 42382869ac5SJames Morse * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). 42482869ac5SJames Morse * 42582869ac5SJames Morse * Memory allocated by get_safe_page() will be dealt with by the hibernate code, 42682869ac5SJames Morse * we don't need to free it here. 42782869ac5SJames Morse */ 42882869ac5SJames Morse int swsusp_arch_resume(void) 42982869ac5SJames Morse { 430a89d7ff9SPavel Tatashin int rc; 43182869ac5SJames Morse void *zero_page; 43282869ac5SJames Morse size_t exit_size; 43382869ac5SJames Morse pgd_t *tmp_pg_dir; 434*788bfdd9SPasha Tatashin phys_addr_t el2_vectors; 43582869ac5SJames Morse void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, 43682869ac5SJames Morse void *, phys_addr_t, phys_addr_t); 43789d1410fSPavel Tatashin struct trans_pgd_info trans_info = { 43889d1410fSPavel Tatashin .trans_alloc_page = hibernate_page_alloc, 43989d1410fSPavel Tatashin .trans_alloc_arg = (void *)GFP_ATOMIC, 44089d1410fSPavel Tatashin }; 44182869ac5SJames Morse 44282869ac5SJames Morse /* 443dfbca61aSMark Rutland * Restoring the memory image will overwrite the ttbr1 page tables. 444dfbca61aSMark Rutland * Create a second copy of just the linear map, and use this when 445dfbca61aSMark Rutland * restoring. 446dfbca61aSMark Rutland */ 44789d1410fSPavel Tatashin rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET, 44889d1410fSPavel Tatashin PAGE_END); 449dfbca61aSMark Rutland if (rc) 450a89d7ff9SPavel Tatashin return rc; 451dfbca61aSMark Rutland 452dfbca61aSMark Rutland /* 453dfbca61aSMark Rutland * We need a zero page that is zero before & after resume in order to 454dfbca61aSMark Rutland * to break before make on the ttbr1 page tables. 455dfbca61aSMark Rutland */ 456dfbca61aSMark Rutland zero_page = (void *)get_safe_page(GFP_ATOMIC); 457dfbca61aSMark Rutland if (!zero_page) { 458117f5727SMark Rutland pr_err("Failed to allocate zero page.\n"); 459a89d7ff9SPavel Tatashin return -ENOMEM; 460dfbca61aSMark Rutland } 461dfbca61aSMark Rutland 462*788bfdd9SPasha Tatashin if (el2_reset_needed()) { 463*788bfdd9SPasha Tatashin rc = trans_pgd_copy_el2_vectors(&trans_info, &el2_vectors); 464*788bfdd9SPasha Tatashin if (rc) { 465*788bfdd9SPasha Tatashin pr_err("Failed to setup el2 vectors\n"); 466*788bfdd9SPasha Tatashin return rc; 467*788bfdd9SPasha Tatashin } 468*788bfdd9SPasha Tatashin } 469*788bfdd9SPasha Tatashin 47082869ac5SJames Morse exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; 47182869ac5SJames Morse /* 47282869ac5SJames Morse * Copy swsusp_arch_suspend_exit() to a safe page. This will generate 47382869ac5SJames Morse * a new set of ttbr0 page tables and load them. 47482869ac5SJames Morse */ 47582869ac5SJames Morse rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, 4767018d467SJames Morse (phys_addr_t *)&hibernate_exit); 47782869ac5SJames Morse if (rc) { 478117f5727SMark Rutland pr_err("Failed to create safe executable page for hibernate_exit code.\n"); 479a89d7ff9SPavel Tatashin return rc; 48082869ac5SJames Morse } 48182869ac5SJames Morse 48282869ac5SJames Morse /* 48382869ac5SJames Morse * KASLR will cause the el2 vectors to be in a different location in 48482869ac5SJames Morse * the resumed kernel. Load hibernate's temporary copy into el2. 48582869ac5SJames Morse * 48682869ac5SJames Morse * We can skip this step if we booted at EL1, or are running with VHE. 48782869ac5SJames Morse */ 488*788bfdd9SPasha Tatashin if (el2_reset_needed()) 48982869ac5SJames Morse __hyp_set_vectors(el2_vectors); 49082869ac5SJames Morse 49182869ac5SJames Morse hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, 4922077be67SLaura Abbott resume_hdr.reenter_kernel, restore_pblist, 49382869ac5SJames Morse resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); 49482869ac5SJames Morse 495a89d7ff9SPavel Tatashin return 0; 49682869ac5SJames Morse } 4971fe492ceSJames Morse 4988ec058fdSJames Morse int hibernate_resume_nonboot_cpu_disable(void) 4998ec058fdSJames Morse { 5008ec058fdSJames Morse if (sleep_cpu < 0) { 5019165dabbSMasanari Iida pr_err("Failing to resume from hibernate on an unknown CPU.\n"); 5028ec058fdSJames Morse return -ENODEV; 5038ec058fdSJames Morse } 5048ec058fdSJames Morse 5058ec058fdSJames Morse return freeze_secondary_cpus(sleep_cpu); 5068ec058fdSJames Morse } 507