1af873fceSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 282869ac5SJames Morse /*: 382869ac5SJames Morse * Hibernate support specific for ARM64 482869ac5SJames Morse * 582869ac5SJames Morse * Derived from work on ARM hibernation support by: 682869ac5SJames Morse * 782869ac5SJames Morse * Ubuntu project, hibernation support for mach-dove 882869ac5SJames Morse * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) 982869ac5SJames Morse * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) 1082869ac5SJames Morse * https://lkml.org/lkml/2010/6/18/4 1182869ac5SJames Morse * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html 1282869ac5SJames Morse * https://patchwork.kernel.org/patch/96442/ 1382869ac5SJames Morse * 1482869ac5SJames Morse * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 1582869ac5SJames Morse */ 1682869ac5SJames Morse #define pr_fmt(x) "hibernate: " x 178ec058fdSJames Morse #include <linux/cpu.h> 1882869ac5SJames Morse #include <linux/kvm_host.h> 1982869ac5SJames Morse #include <linux/pm.h> 2082869ac5SJames Morse #include <linux/sched.h> 2182869ac5SJames Morse #include <linux/suspend.h> 2282869ac5SJames Morse #include <linux/utsname.h> 2382869ac5SJames Morse 2482869ac5SJames Morse #include <asm/barrier.h> 2582869ac5SJames Morse #include <asm/cacheflush.h> 268ec058fdSJames Morse #include <asm/cputype.h> 270fbeb318SJames Morse #include <asm/daifflags.h> 2882869ac5SJames Morse #include <asm/irqflags.h> 29254a41c0SAKASHI Takahiro #include <asm/kexec.h> 3082869ac5SJames Morse #include <asm/memory.h> 3182869ac5SJames Morse #include <asm/mmu_context.h> 32ee11f332SSteven Price #include <asm/mte.h> 3382869ac5SJames Morse #include <asm/sections.h> 34d74b4e4fSJames Morse #include <asm/smp.h> 358ec058fdSJames Morse #include <asm/smp_plat.h> 3682869ac5SJames Morse #include <asm/suspend.h> 370194e760SMark Rutland #include <asm/sysreg.h> 38072e3d96SPavel Tatashin #include <asm/trans_pgd.h> 3982869ac5SJames Morse #include <asm/virt.h> 4082869ac5SJames Morse 4182869ac5SJames Morse /* 4282869ac5SJames Morse * Hibernate core relies on this value being 0 on resume, and marks it 4382869ac5SJames Morse * __nosavedata assuming it will keep the resume kernel's '0' value. This 4482869ac5SJames Morse * doesn't happen with either KASLR. 4582869ac5SJames Morse * 4682869ac5SJames Morse * defined as "__visible int in_suspend __nosavedata" in 4782869ac5SJames Morse * kernel/power/hibernate.c 4882869ac5SJames Morse */ 4982869ac5SJames Morse extern int in_suspend; 5082869ac5SJames Morse 5182869ac5SJames Morse /* Do we need to reset el2? */ 52*094a3684SPasha Tatashin #define el2_reset_needed() (is_hyp_nvhe()) 5382869ac5SJames Morse 5482869ac5SJames Morse /* temporary el2 vectors in the __hibernate_exit_text section. */ 5582869ac5SJames Morse extern char hibernate_el2_vectors[]; 5682869ac5SJames Morse 5782869ac5SJames Morse /* hyp-stub vectors, used to restore el2 during resume from hibernate. */ 5882869ac5SJames Morse extern char __hyp_stub_vectors[]; 5982869ac5SJames Morse 6082869ac5SJames Morse /* 618ec058fdSJames Morse * The logical cpu number we should resume on, initialised to a non-cpu 628ec058fdSJames Morse * number. 638ec058fdSJames Morse */ 648ec058fdSJames Morse static int sleep_cpu = -EINVAL; 658ec058fdSJames Morse 668ec058fdSJames Morse /* 6782869ac5SJames Morse * Values that may not change over hibernate/resume. We put the build number 6882869ac5SJames Morse * and date in here so that we guarantee not to resume with a different 6982869ac5SJames Morse * kernel. 7082869ac5SJames Morse */ 7182869ac5SJames Morse struct arch_hibernate_hdr_invariants { 7282869ac5SJames Morse char uts_version[__NEW_UTS_LEN + 1]; 7382869ac5SJames Morse }; 7482869ac5SJames Morse 7582869ac5SJames Morse /* These values need to be know across a hibernate/restore. */ 7682869ac5SJames Morse static struct arch_hibernate_hdr { 7782869ac5SJames Morse struct arch_hibernate_hdr_invariants invariants; 7882869ac5SJames Morse 7982869ac5SJames Morse /* These are needed to find the relocated kernel if built with kaslr */ 8082869ac5SJames Morse phys_addr_t ttbr1_el1; 8182869ac5SJames Morse void (*reenter_kernel)(void); 8282869ac5SJames Morse 8382869ac5SJames Morse /* 8482869ac5SJames Morse * We need to know where the __hyp_stub_vectors are after restore to 8582869ac5SJames Morse * re-configure el2. 8682869ac5SJames Morse */ 8782869ac5SJames Morse phys_addr_t __hyp_stub_vectors; 888ec058fdSJames Morse 898ec058fdSJames Morse u64 sleep_cpu_mpidr; 9082869ac5SJames Morse } resume_hdr; 9182869ac5SJames Morse 9282869ac5SJames Morse static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) 9382869ac5SJames Morse { 9482869ac5SJames Morse memset(i, 0, sizeof(*i)); 9582869ac5SJames Morse memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); 9682869ac5SJames Morse } 9782869ac5SJames Morse 9882869ac5SJames Morse int pfn_is_nosave(unsigned long pfn) 9982869ac5SJames Morse { 1002077be67SLaura Abbott unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin); 1012077be67SLaura Abbott unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1); 10282869ac5SJames Morse 103254a41c0SAKASHI Takahiro return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) || 104254a41c0SAKASHI Takahiro crash_is_nosave(pfn); 10582869ac5SJames Morse } 10682869ac5SJames Morse 10782869ac5SJames Morse void notrace save_processor_state(void) 10882869ac5SJames Morse { 10982869ac5SJames Morse WARN_ON(num_online_cpus() != 1); 11082869ac5SJames Morse } 11182869ac5SJames Morse 11282869ac5SJames Morse void notrace restore_processor_state(void) 11382869ac5SJames Morse { 11482869ac5SJames Morse } 11582869ac5SJames Morse 11682869ac5SJames Morse int arch_hibernation_header_save(void *addr, unsigned int max_size) 11782869ac5SJames Morse { 11882869ac5SJames Morse struct arch_hibernate_hdr *hdr = addr; 11982869ac5SJames Morse 12082869ac5SJames Morse if (max_size < sizeof(*hdr)) 12182869ac5SJames Morse return -EOVERFLOW; 12282869ac5SJames Morse 12382869ac5SJames Morse arch_hdr_invariants(&hdr->invariants); 1242077be67SLaura Abbott hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir); 12582869ac5SJames Morse hdr->reenter_kernel = _cpu_resume; 12682869ac5SJames Morse 12782869ac5SJames Morse /* We can't use __hyp_get_vectors() because kvm may still be loaded */ 12882869ac5SJames Morse if (el2_reset_needed()) 1292077be67SLaura Abbott hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors); 13082869ac5SJames Morse else 13182869ac5SJames Morse hdr->__hyp_stub_vectors = 0; 13282869ac5SJames Morse 1338ec058fdSJames Morse /* Save the mpidr of the cpu we called cpu_suspend() on... */ 1348ec058fdSJames Morse if (sleep_cpu < 0) { 1359165dabbSMasanari Iida pr_err("Failing to hibernate on an unknown CPU.\n"); 1368ec058fdSJames Morse return -ENODEV; 1378ec058fdSJames Morse } 1388ec058fdSJames Morse hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu); 1398ec058fdSJames Morse pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu, 1408ec058fdSJames Morse hdr->sleep_cpu_mpidr); 1418ec058fdSJames Morse 14282869ac5SJames Morse return 0; 14382869ac5SJames Morse } 14482869ac5SJames Morse EXPORT_SYMBOL(arch_hibernation_header_save); 14582869ac5SJames Morse 14682869ac5SJames Morse int arch_hibernation_header_restore(void *addr) 14782869ac5SJames Morse { 1488ec058fdSJames Morse int ret; 14982869ac5SJames Morse struct arch_hibernate_hdr_invariants invariants; 15082869ac5SJames Morse struct arch_hibernate_hdr *hdr = addr; 15182869ac5SJames Morse 15282869ac5SJames Morse arch_hdr_invariants(&invariants); 15382869ac5SJames Morse if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { 15482869ac5SJames Morse pr_crit("Hibernate image not generated by this kernel!\n"); 15582869ac5SJames Morse return -EINVAL; 15682869ac5SJames Morse } 15782869ac5SJames Morse 1588ec058fdSJames Morse sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr); 1598ec058fdSJames Morse pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu, 1608ec058fdSJames Morse hdr->sleep_cpu_mpidr); 1618ec058fdSJames Morse if (sleep_cpu < 0) { 1628ec058fdSJames Morse pr_crit("Hibernated on a CPU not known to this kernel!\n"); 1638ec058fdSJames Morse sleep_cpu = -EINVAL; 1648ec058fdSJames Morse return -EINVAL; 1658ec058fdSJames Morse } 166e646ac5bSQais Yousef 167e646ac5bSQais Yousef ret = bringup_hibernate_cpu(sleep_cpu); 1688ec058fdSJames Morse if (ret) { 1698ec058fdSJames Morse sleep_cpu = -EINVAL; 1708ec058fdSJames Morse return ret; 1718ec058fdSJames Morse } 1728ec058fdSJames Morse 17382869ac5SJames Morse resume_hdr = *hdr; 17482869ac5SJames Morse 17582869ac5SJames Morse return 0; 17682869ac5SJames Morse } 17782869ac5SJames Morse EXPORT_SYMBOL(arch_hibernation_header_restore); 17882869ac5SJames Morse 17950f53fb7SPavel Tatashin static void *hibernate_page_alloc(void *arg) 18050f53fb7SPavel Tatashin { 181d1bbc35fSPavel Tatashin return (void *)get_safe_page((__force gfp_t)(unsigned long)arg); 18250f53fb7SPavel Tatashin } 18350f53fb7SPavel Tatashin 184a2c2e679SPavel Tatashin /* 185a2c2e679SPavel Tatashin * Copies length bytes, starting at src_start into an new page, 186a2c2e679SPavel Tatashin * perform cache maintenance, then maps it at the specified address low 187a2c2e679SPavel Tatashin * address as executable. 188a2c2e679SPavel Tatashin * 189a2c2e679SPavel Tatashin * This is used by hibernate to copy the code it needs to execute when 190a2c2e679SPavel Tatashin * overwriting the kernel text. This function generates a new set of page 191a2c2e679SPavel Tatashin * tables, which it loads into ttbr0. 192a2c2e679SPavel Tatashin * 193a2c2e679SPavel Tatashin * Length is provided as we probably only want 4K of data, even on a 64K 194a2c2e679SPavel Tatashin * page system. 195a2c2e679SPavel Tatashin */ 196a2c2e679SPavel Tatashin static int create_safe_exec_page(void *src_start, size_t length, 197a2c2e679SPavel Tatashin phys_addr_t *phys_dst_addr) 198a2c2e679SPavel Tatashin { 19950f53fb7SPavel Tatashin struct trans_pgd_info trans_info = { 20050f53fb7SPavel Tatashin .trans_alloc_page = hibernate_page_alloc, 201d1bbc35fSPavel Tatashin .trans_alloc_arg = (__force void *)GFP_ATOMIC, 20250f53fb7SPavel Tatashin }; 20350f53fb7SPavel Tatashin 204a2c2e679SPavel Tatashin void *page = (void *)get_safe_page(GFP_ATOMIC); 2057018d467SJames Morse phys_addr_t trans_ttbr0; 2067018d467SJames Morse unsigned long t0sz; 207a2c2e679SPavel Tatashin int rc; 208a2c2e679SPavel Tatashin 209a2c2e679SPavel Tatashin if (!page) 210a2c2e679SPavel Tatashin return -ENOMEM; 211a2c2e679SPavel Tatashin 212a2c2e679SPavel Tatashin memcpy(page, src_start, length); 213fade9c2cSFuad Tabba caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length); 2147018d467SJames Morse rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page); 215a2c2e679SPavel Tatashin if (rc) 216a2c2e679SPavel Tatashin return rc; 217a2c2e679SPavel Tatashin 2180194e760SMark Rutland /* 2190194e760SMark Rutland * Load our new page tables. A strict BBM approach requires that we 2200194e760SMark Rutland * ensure that TLBs are free of any entries that may overlap with the 2210194e760SMark Rutland * global mappings we are about to install. 2220194e760SMark Rutland * 2230194e760SMark Rutland * For a real hibernate/resume cycle TTBR0 currently points to a zero 2240194e760SMark Rutland * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI 2250194e760SMark Rutland * runtime services), while for a userspace-driven test_resume cycle it 2260194e760SMark Rutland * points to userspace page tables (and we must point it at a zero page 2277018d467SJames Morse * ourselves). 2287018d467SJames Morse * 2297018d467SJames Morse * We change T0SZ as part of installing the idmap. This is undone by 2307018d467SJames Morse * cpu_uninstall_idmap() in __cpu_suspend_exit(). 2310194e760SMark Rutland */ 2320194e760SMark Rutland cpu_set_reserved_ttbr0(); 2330194e760SMark Rutland local_flush_tlb_all(); 2347018d467SJames Morse __cpu_set_tcr_t0sz(t0sz); 2357018d467SJames Morse write_sysreg(trans_ttbr0, ttbr0_el1); 2360194e760SMark Rutland isb(); 23782869ac5SJames Morse 23813373f0eSPavel Tatashin *phys_dst_addr = virt_to_phys(page); 23982869ac5SJames Morse 240a89d7ff9SPavel Tatashin return 0; 24182869ac5SJames Morse } 24282869ac5SJames Morse 243ee11f332SSteven Price #ifdef CONFIG_ARM64_MTE 244ee11f332SSteven Price 245ee11f332SSteven Price static DEFINE_XARRAY(mte_pages); 246ee11f332SSteven Price 247ee11f332SSteven Price static int save_tags(struct page *page, unsigned long pfn) 248ee11f332SSteven Price { 249ee11f332SSteven Price void *tag_storage, *ret; 250ee11f332SSteven Price 251ee11f332SSteven Price tag_storage = mte_allocate_tag_storage(); 252ee11f332SSteven Price if (!tag_storage) 253ee11f332SSteven Price return -ENOMEM; 254ee11f332SSteven Price 255ee11f332SSteven Price mte_save_page_tags(page_address(page), tag_storage); 256ee11f332SSteven Price 257ee11f332SSteven Price ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL); 258ee11f332SSteven Price if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { 259ee11f332SSteven Price mte_free_tag_storage(tag_storage); 260ee11f332SSteven Price return xa_err(ret); 261ee11f332SSteven Price } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) { 262ee11f332SSteven Price mte_free_tag_storage(ret); 263ee11f332SSteven Price } 264ee11f332SSteven Price 265ee11f332SSteven Price return 0; 266ee11f332SSteven Price } 267ee11f332SSteven Price 268ee11f332SSteven Price static void swsusp_mte_free_storage(void) 269ee11f332SSteven Price { 270ee11f332SSteven Price XA_STATE(xa_state, &mte_pages, 0); 271ee11f332SSteven Price void *tags; 272ee11f332SSteven Price 273ee11f332SSteven Price xa_lock(&mte_pages); 274ee11f332SSteven Price xas_for_each(&xa_state, tags, ULONG_MAX) { 275ee11f332SSteven Price mte_free_tag_storage(tags); 276ee11f332SSteven Price } 277ee11f332SSteven Price xa_unlock(&mte_pages); 278ee11f332SSteven Price 279ee11f332SSteven Price xa_destroy(&mte_pages); 280ee11f332SSteven Price } 281ee11f332SSteven Price 282ee11f332SSteven Price static int swsusp_mte_save_tags(void) 283ee11f332SSteven Price { 284ee11f332SSteven Price struct zone *zone; 285ee11f332SSteven Price unsigned long pfn, max_zone_pfn; 286ee11f332SSteven Price int ret = 0; 287ee11f332SSteven Price int n = 0; 288ee11f332SSteven Price 289ee11f332SSteven Price if (!system_supports_mte()) 290ee11f332SSteven Price return 0; 291ee11f332SSteven Price 292ee11f332SSteven Price for_each_populated_zone(zone) { 293ee11f332SSteven Price max_zone_pfn = zone_end_pfn(zone); 294ee11f332SSteven Price for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 295ee11f332SSteven Price struct page *page = pfn_to_online_page(pfn); 296ee11f332SSteven Price 297ee11f332SSteven Price if (!page) 298ee11f332SSteven Price continue; 299ee11f332SSteven Price 300ee11f332SSteven Price if (!test_bit(PG_mte_tagged, &page->flags)) 301ee11f332SSteven Price continue; 302ee11f332SSteven Price 303ee11f332SSteven Price ret = save_tags(page, pfn); 304ee11f332SSteven Price if (ret) { 305ee11f332SSteven Price swsusp_mte_free_storage(); 306ee11f332SSteven Price goto out; 307ee11f332SSteven Price } 308ee11f332SSteven Price 309ee11f332SSteven Price n++; 310ee11f332SSteven Price } 311ee11f332SSteven Price } 312ee11f332SSteven Price pr_info("Saved %d MTE pages\n", n); 313ee11f332SSteven Price 314ee11f332SSteven Price out: 315ee11f332SSteven Price return ret; 316ee11f332SSteven Price } 317ee11f332SSteven Price 318ee11f332SSteven Price static void swsusp_mte_restore_tags(void) 319ee11f332SSteven Price { 320ee11f332SSteven Price XA_STATE(xa_state, &mte_pages, 0); 321ee11f332SSteven Price int n = 0; 322ee11f332SSteven Price void *tags; 323ee11f332SSteven Price 324ee11f332SSteven Price xa_lock(&mte_pages); 325ee11f332SSteven Price xas_for_each(&xa_state, tags, ULONG_MAX) { 326ee11f332SSteven Price unsigned long pfn = xa_state.xa_index; 327ee11f332SSteven Price struct page *page = pfn_to_online_page(pfn); 328ee11f332SSteven Price 329e5b8d921SVincenzo Frascino /* 330e5b8d921SVincenzo Frascino * It is not required to invoke page_kasan_tag_reset(page) 331e5b8d921SVincenzo Frascino * at this point since the tags stored in page->flags are 332e5b8d921SVincenzo Frascino * already restored. 333e5b8d921SVincenzo Frascino */ 334ee11f332SSteven Price mte_restore_page_tags(page_address(page), tags); 335ee11f332SSteven Price 336ee11f332SSteven Price mte_free_tag_storage(tags); 337ee11f332SSteven Price n++; 338ee11f332SSteven Price } 339ee11f332SSteven Price xa_unlock(&mte_pages); 340ee11f332SSteven Price 341ee11f332SSteven Price pr_info("Restored %d MTE pages\n", n); 342ee11f332SSteven Price 343ee11f332SSteven Price xa_destroy(&mte_pages); 344ee11f332SSteven Price } 345ee11f332SSteven Price 346ee11f332SSteven Price #else /* CONFIG_ARM64_MTE */ 347ee11f332SSteven Price 348ee11f332SSteven Price static int swsusp_mte_save_tags(void) 349ee11f332SSteven Price { 350ee11f332SSteven Price return 0; 351ee11f332SSteven Price } 352ee11f332SSteven Price 353ee11f332SSteven Price static void swsusp_mte_restore_tags(void) 354ee11f332SSteven Price { 355ee11f332SSteven Price } 356ee11f332SSteven Price 357ee11f332SSteven Price #endif /* CONFIG_ARM64_MTE */ 358ee11f332SSteven Price 35982869ac5SJames Morse int swsusp_arch_suspend(void) 36082869ac5SJames Morse { 36182869ac5SJames Morse int ret = 0; 36282869ac5SJames Morse unsigned long flags; 36382869ac5SJames Morse struct sleep_stack_data state; 36482869ac5SJames Morse 365d74b4e4fSJames Morse if (cpus_are_stuck_in_kernel()) { 366d74b4e4fSJames Morse pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n"); 367d74b4e4fSJames Morse return -EBUSY; 368d74b4e4fSJames Morse } 369d74b4e4fSJames Morse 3700fbeb318SJames Morse flags = local_daif_save(); 37182869ac5SJames Morse 37282869ac5SJames Morse if (__cpu_suspend_enter(&state)) { 373254a41c0SAKASHI Takahiro /* make the crash dump kernel image visible/saveable */ 374254a41c0SAKASHI Takahiro crash_prepare_suspend(); 375254a41c0SAKASHI Takahiro 376ee11f332SSteven Price ret = swsusp_mte_save_tags(); 377ee11f332SSteven Price if (ret) 378ee11f332SSteven Price return ret; 379ee11f332SSteven Price 3808ec058fdSJames Morse sleep_cpu = smp_processor_id(); 38182869ac5SJames Morse ret = swsusp_save(); 38282869ac5SJames Morse } else { 3835ebe3a44SJames Morse /* Clean kernel core startup/idle code to PoC*/ 384fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)__mmuoff_data_start, 385814b1860SFuad Tabba (unsigned long)__mmuoff_data_end); 386fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)__idmap_text_start, 387814b1860SFuad Tabba (unsigned long)__idmap_text_end); 3885ebe3a44SJames Morse 3895ebe3a44SJames Morse /* Clean kvm setup code to PoC? */ 390f7daa9c8SJames Morse if (el2_reset_needed()) { 391fade9c2cSFuad Tabba dcache_clean_inval_poc( 392814b1860SFuad Tabba (unsigned long)__hyp_idmap_text_start, 393814b1860SFuad Tabba (unsigned long)__hyp_idmap_text_end); 394fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)__hyp_text_start, 395814b1860SFuad Tabba (unsigned long)__hyp_text_end); 396f7daa9c8SJames Morse } 39782869ac5SJames Morse 398ee11f332SSteven Price swsusp_mte_restore_tags(); 399ee11f332SSteven Price 400254a41c0SAKASHI Takahiro /* make the crash dump kernel image protected again */ 401254a41c0SAKASHI Takahiro crash_post_resume(); 402254a41c0SAKASHI Takahiro 40382869ac5SJames Morse /* 40482869ac5SJames Morse * Tell the hibernation core that we've just restored 40582869ac5SJames Morse * the memory 40682869ac5SJames Morse */ 40782869ac5SJames Morse in_suspend = 0; 40882869ac5SJames Morse 4098ec058fdSJames Morse sleep_cpu = -EINVAL; 41082869ac5SJames Morse __cpu_suspend_exit(); 411647d0519SMarc Zyngier 412647d0519SMarc Zyngier /* 413647d0519SMarc Zyngier * Just in case the boot kernel did turn the SSBD 414647d0519SMarc Zyngier * mitigation off behind our back, let's set the state 415647d0519SMarc Zyngier * to what we expect it to be. 416647d0519SMarc Zyngier */ 417c2876207SWill Deacon spectre_v4_enable_mitigation(NULL); 41882869ac5SJames Morse } 41982869ac5SJames Morse 4200fbeb318SJames Morse local_daif_restore(flags); 42182869ac5SJames Morse 42282869ac5SJames Morse return ret; 42382869ac5SJames Morse } 42482869ac5SJames Morse 42582869ac5SJames Morse /* 42682869ac5SJames Morse * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). 42782869ac5SJames Morse * 42882869ac5SJames Morse * Memory allocated by get_safe_page() will be dealt with by the hibernate code, 42982869ac5SJames Morse * we don't need to free it here. 43082869ac5SJames Morse */ 43182869ac5SJames Morse int swsusp_arch_resume(void) 43282869ac5SJames Morse { 433a89d7ff9SPavel Tatashin int rc; 43482869ac5SJames Morse void *zero_page; 43582869ac5SJames Morse size_t exit_size; 43682869ac5SJames Morse pgd_t *tmp_pg_dir; 43782869ac5SJames Morse void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, 43882869ac5SJames Morse void *, phys_addr_t, phys_addr_t); 43989d1410fSPavel Tatashin struct trans_pgd_info trans_info = { 44089d1410fSPavel Tatashin .trans_alloc_page = hibernate_page_alloc, 44189d1410fSPavel Tatashin .trans_alloc_arg = (void *)GFP_ATOMIC, 44289d1410fSPavel Tatashin }; 44382869ac5SJames Morse 44482869ac5SJames Morse /* 445dfbca61aSMark Rutland * Restoring the memory image will overwrite the ttbr1 page tables. 446dfbca61aSMark Rutland * Create a second copy of just the linear map, and use this when 447dfbca61aSMark Rutland * restoring. 448dfbca61aSMark Rutland */ 44989d1410fSPavel Tatashin rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET, 45089d1410fSPavel Tatashin PAGE_END); 451dfbca61aSMark Rutland if (rc) 452a89d7ff9SPavel Tatashin return rc; 453dfbca61aSMark Rutland 454dfbca61aSMark Rutland /* 455dfbca61aSMark Rutland * We need a zero page that is zero before & after resume in order to 456dfbca61aSMark Rutland * to break before make on the ttbr1 page tables. 457dfbca61aSMark Rutland */ 458dfbca61aSMark Rutland zero_page = (void *)get_safe_page(GFP_ATOMIC); 459dfbca61aSMark Rutland if (!zero_page) { 460117f5727SMark Rutland pr_err("Failed to allocate zero page.\n"); 461a89d7ff9SPavel Tatashin return -ENOMEM; 462dfbca61aSMark Rutland } 463dfbca61aSMark Rutland 46482869ac5SJames Morse exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; 46582869ac5SJames Morse /* 46682869ac5SJames Morse * Copy swsusp_arch_suspend_exit() to a safe page. This will generate 46782869ac5SJames Morse * a new set of ttbr0 page tables and load them. 46882869ac5SJames Morse */ 46982869ac5SJames Morse rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, 4707018d467SJames Morse (phys_addr_t *)&hibernate_exit); 47182869ac5SJames Morse if (rc) { 472117f5727SMark Rutland pr_err("Failed to create safe executable page for hibernate_exit code.\n"); 473a89d7ff9SPavel Tatashin return rc; 47482869ac5SJames Morse } 47582869ac5SJames Morse 47682869ac5SJames Morse /* 47782869ac5SJames Morse * The hibernate exit text contains a set of el2 vectors, that will 47882869ac5SJames Morse * be executed at el2 with the mmu off in order to reload hyp-stub. 47982869ac5SJames Morse */ 480fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)hibernate_exit, 481814b1860SFuad Tabba (unsigned long)hibernate_exit + exit_size); 48282869ac5SJames Morse 48382869ac5SJames Morse /* 48482869ac5SJames Morse * KASLR will cause the el2 vectors to be in a different location in 48582869ac5SJames Morse * the resumed kernel. Load hibernate's temporary copy into el2. 48682869ac5SJames Morse * 48782869ac5SJames Morse * We can skip this step if we booted at EL1, or are running with VHE. 48882869ac5SJames Morse */ 48982869ac5SJames Morse if (el2_reset_needed()) { 4907018d467SJames Morse phys_addr_t el2_vectors = (phys_addr_t)hibernate_exit; 49182869ac5SJames Morse el2_vectors += hibernate_el2_vectors - 49282869ac5SJames Morse __hibernate_exit_text_start; /* offset */ 49382869ac5SJames Morse 49482869ac5SJames Morse __hyp_set_vectors(el2_vectors); 49582869ac5SJames Morse } 49682869ac5SJames Morse 49782869ac5SJames Morse hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, 4982077be67SLaura Abbott resume_hdr.reenter_kernel, restore_pblist, 49982869ac5SJames Morse resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); 50082869ac5SJames Morse 501a89d7ff9SPavel Tatashin return 0; 50282869ac5SJames Morse } 5031fe492ceSJames Morse 5048ec058fdSJames Morse int hibernate_resume_nonboot_cpu_disable(void) 5058ec058fdSJames Morse { 5068ec058fdSJames Morse if (sleep_cpu < 0) { 5079165dabbSMasanari Iida pr_err("Failing to resume from hibernate on an unknown CPU.\n"); 5088ec058fdSJames Morse return -ENODEV; 5098ec058fdSJames Morse } 5108ec058fdSJames Morse 5118ec058fdSJames Morse return freeze_secondary_cpus(sleep_cpu); 5128ec058fdSJames Morse } 513