1af873fceSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 282869ac5SJames Morse /*: 382869ac5SJames Morse * Hibernate support specific for ARM64 482869ac5SJames Morse * 582869ac5SJames Morse * Derived from work on ARM hibernation support by: 682869ac5SJames Morse * 782869ac5SJames Morse * Ubuntu project, hibernation support for mach-dove 882869ac5SJames Morse * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) 982869ac5SJames Morse * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) 1082869ac5SJames Morse * https://lkml.org/lkml/2010/6/18/4 1182869ac5SJames Morse * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html 1282869ac5SJames Morse * https://patchwork.kernel.org/patch/96442/ 1382869ac5SJames Morse * 1482869ac5SJames Morse * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 1582869ac5SJames Morse */ 1682869ac5SJames Morse #define pr_fmt(x) "hibernate: " x 178ec058fdSJames Morse #include <linux/cpu.h> 1882869ac5SJames Morse #include <linux/kvm_host.h> 1982869ac5SJames Morse #include <linux/pm.h> 2082869ac5SJames Morse #include <linux/sched.h> 2182869ac5SJames Morse #include <linux/suspend.h> 2282869ac5SJames Morse #include <linux/utsname.h> 2382869ac5SJames Morse 2482869ac5SJames Morse #include <asm/barrier.h> 2582869ac5SJames Morse #include <asm/cacheflush.h> 268ec058fdSJames Morse #include <asm/cputype.h> 270fbeb318SJames Morse #include <asm/daifflags.h> 2882869ac5SJames Morse #include <asm/irqflags.h> 29254a41c0SAKASHI Takahiro #include <asm/kexec.h> 3082869ac5SJames Morse #include <asm/memory.h> 3182869ac5SJames Morse #include <asm/mmu_context.h> 32ee11f332SSteven Price #include <asm/mte.h> 3382869ac5SJames Morse #include <asm/sections.h> 34d74b4e4fSJames Morse #include <asm/smp.h> 358ec058fdSJames Morse #include <asm/smp_plat.h> 3682869ac5SJames Morse #include <asm/suspend.h> 370194e760SMark Rutland #include <asm/sysreg.h> 38072e3d96SPavel Tatashin #include <asm/trans_pgd.h> 3982869ac5SJames Morse #include <asm/virt.h> 4082869ac5SJames Morse 4182869ac5SJames Morse /* 4282869ac5SJames Morse * Hibernate core relies on this value being 0 on resume, and marks it 4382869ac5SJames Morse * __nosavedata assuming it will keep the resume kernel's '0' value. This 4482869ac5SJames Morse * doesn't happen with either KASLR. 4582869ac5SJames Morse * 4682869ac5SJames Morse * defined as "__visible int in_suspend __nosavedata" in 4782869ac5SJames Morse * kernel/power/hibernate.c 4882869ac5SJames Morse */ 4982869ac5SJames Morse extern int in_suspend; 5082869ac5SJames Morse 5182869ac5SJames Morse /* Do we need to reset el2? */ 5282869ac5SJames Morse #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) 5382869ac5SJames Morse 5482869ac5SJames Morse /* temporary el2 vectors in the __hibernate_exit_text section. */ 5582869ac5SJames Morse extern char hibernate_el2_vectors[]; 5682869ac5SJames Morse 5782869ac5SJames Morse /* hyp-stub vectors, used to restore el2 during resume from hibernate. */ 5882869ac5SJames Morse extern char __hyp_stub_vectors[]; 5982869ac5SJames Morse 6082869ac5SJames Morse /* 618ec058fdSJames Morse * The logical cpu number we should resume on, initialised to a non-cpu 628ec058fdSJames Morse * number. 638ec058fdSJames Morse */ 648ec058fdSJames Morse static int sleep_cpu = -EINVAL; 658ec058fdSJames Morse 668ec058fdSJames Morse /* 6782869ac5SJames Morse * Values that may not change over hibernate/resume. We put the build number 6882869ac5SJames Morse * and date in here so that we guarantee not to resume with a different 6982869ac5SJames Morse * kernel. 7082869ac5SJames Morse */ 7182869ac5SJames Morse struct arch_hibernate_hdr_invariants { 7282869ac5SJames Morse char uts_version[__NEW_UTS_LEN + 1]; 7382869ac5SJames Morse }; 7482869ac5SJames Morse 7582869ac5SJames Morse /* These values need to be know across a hibernate/restore. */ 7682869ac5SJames Morse static struct arch_hibernate_hdr { 7782869ac5SJames Morse struct arch_hibernate_hdr_invariants invariants; 7882869ac5SJames Morse 7982869ac5SJames Morse /* These are needed to find the relocated kernel if built with kaslr */ 8082869ac5SJames Morse phys_addr_t ttbr1_el1; 8182869ac5SJames Morse void (*reenter_kernel)(void); 8282869ac5SJames Morse 8382869ac5SJames Morse /* 8482869ac5SJames Morse * We need to know where the __hyp_stub_vectors are after restore to 8582869ac5SJames Morse * re-configure el2. 8682869ac5SJames Morse */ 8782869ac5SJames Morse phys_addr_t __hyp_stub_vectors; 888ec058fdSJames Morse 898ec058fdSJames Morse u64 sleep_cpu_mpidr; 9082869ac5SJames Morse } resume_hdr; 9182869ac5SJames Morse 9282869ac5SJames Morse static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) 9382869ac5SJames Morse { 9482869ac5SJames Morse memset(i, 0, sizeof(*i)); 9582869ac5SJames Morse memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); 9682869ac5SJames Morse } 9782869ac5SJames Morse 9882869ac5SJames Morse int pfn_is_nosave(unsigned long pfn) 9982869ac5SJames Morse { 1002077be67SLaura Abbott unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin); 1012077be67SLaura Abbott unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1); 10282869ac5SJames Morse 103254a41c0SAKASHI Takahiro return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) || 104254a41c0SAKASHI Takahiro crash_is_nosave(pfn); 10582869ac5SJames Morse } 10682869ac5SJames Morse 10782869ac5SJames Morse void notrace save_processor_state(void) 10882869ac5SJames Morse { 10982869ac5SJames Morse WARN_ON(num_online_cpus() != 1); 11082869ac5SJames Morse } 11182869ac5SJames Morse 11282869ac5SJames Morse void notrace restore_processor_state(void) 11382869ac5SJames Morse { 11482869ac5SJames Morse } 11582869ac5SJames Morse 11682869ac5SJames Morse int arch_hibernation_header_save(void *addr, unsigned int max_size) 11782869ac5SJames Morse { 11882869ac5SJames Morse struct arch_hibernate_hdr *hdr = addr; 11982869ac5SJames Morse 12082869ac5SJames Morse if (max_size < sizeof(*hdr)) 12182869ac5SJames Morse return -EOVERFLOW; 12282869ac5SJames Morse 12382869ac5SJames Morse arch_hdr_invariants(&hdr->invariants); 1242077be67SLaura Abbott hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir); 12582869ac5SJames Morse hdr->reenter_kernel = _cpu_resume; 12682869ac5SJames Morse 12782869ac5SJames Morse /* We can't use __hyp_get_vectors() because kvm may still be loaded */ 12882869ac5SJames Morse if (el2_reset_needed()) 1292077be67SLaura Abbott hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors); 13082869ac5SJames Morse else 13182869ac5SJames Morse hdr->__hyp_stub_vectors = 0; 13282869ac5SJames Morse 1338ec058fdSJames Morse /* Save the mpidr of the cpu we called cpu_suspend() on... */ 1348ec058fdSJames Morse if (sleep_cpu < 0) { 1359165dabbSMasanari Iida pr_err("Failing to hibernate on an unknown CPU.\n"); 1368ec058fdSJames Morse return -ENODEV; 1378ec058fdSJames Morse } 1388ec058fdSJames Morse hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu); 1398ec058fdSJames Morse pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu, 1408ec058fdSJames Morse hdr->sleep_cpu_mpidr); 1418ec058fdSJames Morse 14282869ac5SJames Morse return 0; 14382869ac5SJames Morse } 14482869ac5SJames Morse EXPORT_SYMBOL(arch_hibernation_header_save); 14582869ac5SJames Morse 14682869ac5SJames Morse int arch_hibernation_header_restore(void *addr) 14782869ac5SJames Morse { 1488ec058fdSJames Morse int ret; 14982869ac5SJames Morse struct arch_hibernate_hdr_invariants invariants; 15082869ac5SJames Morse struct arch_hibernate_hdr *hdr = addr; 15182869ac5SJames Morse 15282869ac5SJames Morse arch_hdr_invariants(&invariants); 15382869ac5SJames Morse if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { 15482869ac5SJames Morse pr_crit("Hibernate image not generated by this kernel!\n"); 15582869ac5SJames Morse return -EINVAL; 15682869ac5SJames Morse } 15782869ac5SJames Morse 1588ec058fdSJames Morse sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr); 1598ec058fdSJames Morse pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu, 1608ec058fdSJames Morse hdr->sleep_cpu_mpidr); 1618ec058fdSJames Morse if (sleep_cpu < 0) { 1628ec058fdSJames Morse pr_crit("Hibernated on a CPU not known to this kernel!\n"); 1638ec058fdSJames Morse sleep_cpu = -EINVAL; 1648ec058fdSJames Morse return -EINVAL; 1658ec058fdSJames Morse } 166e646ac5bSQais Yousef 167e646ac5bSQais Yousef ret = bringup_hibernate_cpu(sleep_cpu); 1688ec058fdSJames Morse if (ret) { 1698ec058fdSJames Morse sleep_cpu = -EINVAL; 1708ec058fdSJames Morse return ret; 1718ec058fdSJames Morse } 1728ec058fdSJames Morse 17382869ac5SJames Morse resume_hdr = *hdr; 17482869ac5SJames Morse 17582869ac5SJames Morse return 0; 17682869ac5SJames Morse } 17782869ac5SJames Morse EXPORT_SYMBOL(arch_hibernation_header_restore); 17882869ac5SJames Morse 179*50f53fb7SPavel Tatashin static void *hibernate_page_alloc(void *arg) 180*50f53fb7SPavel Tatashin { 181*50f53fb7SPavel Tatashin return (void *)get_safe_page((gfp_t)(unsigned long)arg); 182*50f53fb7SPavel Tatashin } 183*50f53fb7SPavel Tatashin 184a2c2e679SPavel Tatashin /* 185a2c2e679SPavel Tatashin * Copies length bytes, starting at src_start into an new page, 186a2c2e679SPavel Tatashin * perform cache maintenance, then maps it at the specified address low 187a2c2e679SPavel Tatashin * address as executable. 188a2c2e679SPavel Tatashin * 189a2c2e679SPavel Tatashin * This is used by hibernate to copy the code it needs to execute when 190a2c2e679SPavel Tatashin * overwriting the kernel text. This function generates a new set of page 191a2c2e679SPavel Tatashin * tables, which it loads into ttbr0. 192a2c2e679SPavel Tatashin * 193a2c2e679SPavel Tatashin * Length is provided as we probably only want 4K of data, even on a 64K 194a2c2e679SPavel Tatashin * page system. 195a2c2e679SPavel Tatashin */ 196a2c2e679SPavel Tatashin static int create_safe_exec_page(void *src_start, size_t length, 197a2c2e679SPavel Tatashin unsigned long dst_addr, 198a2c2e679SPavel Tatashin phys_addr_t *phys_dst_addr) 199a2c2e679SPavel Tatashin { 200*50f53fb7SPavel Tatashin struct trans_pgd_info trans_info = { 201*50f53fb7SPavel Tatashin .trans_alloc_page = hibernate_page_alloc, 202*50f53fb7SPavel Tatashin .trans_alloc_arg = (void *)GFP_ATOMIC, 203*50f53fb7SPavel Tatashin }; 204*50f53fb7SPavel Tatashin 205a2c2e679SPavel Tatashin void *page = (void *)get_safe_page(GFP_ATOMIC); 206a2c2e679SPavel Tatashin pgd_t *trans_pgd; 207a2c2e679SPavel Tatashin int rc; 208a2c2e679SPavel Tatashin 209a2c2e679SPavel Tatashin if (!page) 210a2c2e679SPavel Tatashin return -ENOMEM; 211a2c2e679SPavel Tatashin 212a2c2e679SPavel Tatashin memcpy(page, src_start, length); 213a2c2e679SPavel Tatashin __flush_icache_range((unsigned long)page, (unsigned long)page + length); 214a2c2e679SPavel Tatashin 215a2c2e679SPavel Tatashin trans_pgd = (void *)get_safe_page(GFP_ATOMIC); 216a2c2e679SPavel Tatashin if (!trans_pgd) 217a2c2e679SPavel Tatashin return -ENOMEM; 218a2c2e679SPavel Tatashin 219*50f53fb7SPavel Tatashin rc = trans_pgd_map_page(&trans_info, trans_pgd, page, dst_addr, 220a2c2e679SPavel Tatashin PAGE_KERNEL_EXEC); 221a2c2e679SPavel Tatashin if (rc) 222a2c2e679SPavel Tatashin return rc; 223a2c2e679SPavel Tatashin 2240194e760SMark Rutland /* 2250194e760SMark Rutland * Load our new page tables. A strict BBM approach requires that we 2260194e760SMark Rutland * ensure that TLBs are free of any entries that may overlap with the 2270194e760SMark Rutland * global mappings we are about to install. 2280194e760SMark Rutland * 2290194e760SMark Rutland * For a real hibernate/resume cycle TTBR0 currently points to a zero 2300194e760SMark Rutland * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI 2310194e760SMark Rutland * runtime services), while for a userspace-driven test_resume cycle it 2320194e760SMark Rutland * points to userspace page tables (and we must point it at a zero page 2330194e760SMark Rutland * ourselves). Elsewhere we only (un)install the idmap with preemption 2340194e760SMark Rutland * disabled, so T0SZ should be as required regardless. 2350194e760SMark Rutland */ 2360194e760SMark Rutland cpu_set_reserved_ttbr0(); 2370194e760SMark Rutland local_flush_tlb_all(); 238d234332cSPavel Tatashin write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1); 2390194e760SMark Rutland isb(); 24082869ac5SJames Morse 24113373f0eSPavel Tatashin *phys_dst_addr = virt_to_phys(page); 24282869ac5SJames Morse 243a89d7ff9SPavel Tatashin return 0; 24482869ac5SJames Morse } 24582869ac5SJames Morse 2465ebe3a44SJames Morse #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) 24782869ac5SJames Morse 248ee11f332SSteven Price #ifdef CONFIG_ARM64_MTE 249ee11f332SSteven Price 250ee11f332SSteven Price static DEFINE_XARRAY(mte_pages); 251ee11f332SSteven Price 252ee11f332SSteven Price static int save_tags(struct page *page, unsigned long pfn) 253ee11f332SSteven Price { 254ee11f332SSteven Price void *tag_storage, *ret; 255ee11f332SSteven Price 256ee11f332SSteven Price tag_storage = mte_allocate_tag_storage(); 257ee11f332SSteven Price if (!tag_storage) 258ee11f332SSteven Price return -ENOMEM; 259ee11f332SSteven Price 260ee11f332SSteven Price mte_save_page_tags(page_address(page), tag_storage); 261ee11f332SSteven Price 262ee11f332SSteven Price ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL); 263ee11f332SSteven Price if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { 264ee11f332SSteven Price mte_free_tag_storage(tag_storage); 265ee11f332SSteven Price return xa_err(ret); 266ee11f332SSteven Price } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) { 267ee11f332SSteven Price mte_free_tag_storage(ret); 268ee11f332SSteven Price } 269ee11f332SSteven Price 270ee11f332SSteven Price return 0; 271ee11f332SSteven Price } 272ee11f332SSteven Price 273ee11f332SSteven Price static void swsusp_mte_free_storage(void) 274ee11f332SSteven Price { 275ee11f332SSteven Price XA_STATE(xa_state, &mte_pages, 0); 276ee11f332SSteven Price void *tags; 277ee11f332SSteven Price 278ee11f332SSteven Price xa_lock(&mte_pages); 279ee11f332SSteven Price xas_for_each(&xa_state, tags, ULONG_MAX) { 280ee11f332SSteven Price mte_free_tag_storage(tags); 281ee11f332SSteven Price } 282ee11f332SSteven Price xa_unlock(&mte_pages); 283ee11f332SSteven Price 284ee11f332SSteven Price xa_destroy(&mte_pages); 285ee11f332SSteven Price } 286ee11f332SSteven Price 287ee11f332SSteven Price static int swsusp_mte_save_tags(void) 288ee11f332SSteven Price { 289ee11f332SSteven Price struct zone *zone; 290ee11f332SSteven Price unsigned long pfn, max_zone_pfn; 291ee11f332SSteven Price int ret = 0; 292ee11f332SSteven Price int n = 0; 293ee11f332SSteven Price 294ee11f332SSteven Price if (!system_supports_mte()) 295ee11f332SSteven Price return 0; 296ee11f332SSteven Price 297ee11f332SSteven Price for_each_populated_zone(zone) { 298ee11f332SSteven Price max_zone_pfn = zone_end_pfn(zone); 299ee11f332SSteven Price for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 300ee11f332SSteven Price struct page *page = pfn_to_online_page(pfn); 301ee11f332SSteven Price 302ee11f332SSteven Price if (!page) 303ee11f332SSteven Price continue; 304ee11f332SSteven Price 305ee11f332SSteven Price if (!test_bit(PG_mte_tagged, &page->flags)) 306ee11f332SSteven Price continue; 307ee11f332SSteven Price 308ee11f332SSteven Price ret = save_tags(page, pfn); 309ee11f332SSteven Price if (ret) { 310ee11f332SSteven Price swsusp_mte_free_storage(); 311ee11f332SSteven Price goto out; 312ee11f332SSteven Price } 313ee11f332SSteven Price 314ee11f332SSteven Price n++; 315ee11f332SSteven Price } 316ee11f332SSteven Price } 317ee11f332SSteven Price pr_info("Saved %d MTE pages\n", n); 318ee11f332SSteven Price 319ee11f332SSteven Price out: 320ee11f332SSteven Price return ret; 321ee11f332SSteven Price } 322ee11f332SSteven Price 323ee11f332SSteven Price static void swsusp_mte_restore_tags(void) 324ee11f332SSteven Price { 325ee11f332SSteven Price XA_STATE(xa_state, &mte_pages, 0); 326ee11f332SSteven Price int n = 0; 327ee11f332SSteven Price void *tags; 328ee11f332SSteven Price 329ee11f332SSteven Price xa_lock(&mte_pages); 330ee11f332SSteven Price xas_for_each(&xa_state, tags, ULONG_MAX) { 331ee11f332SSteven Price unsigned long pfn = xa_state.xa_index; 332ee11f332SSteven Price struct page *page = pfn_to_online_page(pfn); 333ee11f332SSteven Price 334e5b8d921SVincenzo Frascino /* 335e5b8d921SVincenzo Frascino * It is not required to invoke page_kasan_tag_reset(page) 336e5b8d921SVincenzo Frascino * at this point since the tags stored in page->flags are 337e5b8d921SVincenzo Frascino * already restored. 338e5b8d921SVincenzo Frascino */ 339ee11f332SSteven Price mte_restore_page_tags(page_address(page), tags); 340ee11f332SSteven Price 341ee11f332SSteven Price mte_free_tag_storage(tags); 342ee11f332SSteven Price n++; 343ee11f332SSteven Price } 344ee11f332SSteven Price xa_unlock(&mte_pages); 345ee11f332SSteven Price 346ee11f332SSteven Price pr_info("Restored %d MTE pages\n", n); 347ee11f332SSteven Price 348ee11f332SSteven Price xa_destroy(&mte_pages); 349ee11f332SSteven Price } 350ee11f332SSteven Price 351ee11f332SSteven Price #else /* CONFIG_ARM64_MTE */ 352ee11f332SSteven Price 353ee11f332SSteven Price static int swsusp_mte_save_tags(void) 354ee11f332SSteven Price { 355ee11f332SSteven Price return 0; 356ee11f332SSteven Price } 357ee11f332SSteven Price 358ee11f332SSteven Price static void swsusp_mte_restore_tags(void) 359ee11f332SSteven Price { 360ee11f332SSteven Price } 361ee11f332SSteven Price 362ee11f332SSteven Price #endif /* CONFIG_ARM64_MTE */ 363ee11f332SSteven Price 36482869ac5SJames Morse int swsusp_arch_suspend(void) 36582869ac5SJames Morse { 36682869ac5SJames Morse int ret = 0; 36782869ac5SJames Morse unsigned long flags; 36882869ac5SJames Morse struct sleep_stack_data state; 36982869ac5SJames Morse 370d74b4e4fSJames Morse if (cpus_are_stuck_in_kernel()) { 371d74b4e4fSJames Morse pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n"); 372d74b4e4fSJames Morse return -EBUSY; 373d74b4e4fSJames Morse } 374d74b4e4fSJames Morse 3750fbeb318SJames Morse flags = local_daif_save(); 37682869ac5SJames Morse 37782869ac5SJames Morse if (__cpu_suspend_enter(&state)) { 378254a41c0SAKASHI Takahiro /* make the crash dump kernel image visible/saveable */ 379254a41c0SAKASHI Takahiro crash_prepare_suspend(); 380254a41c0SAKASHI Takahiro 381ee11f332SSteven Price ret = swsusp_mte_save_tags(); 382ee11f332SSteven Price if (ret) 383ee11f332SSteven Price return ret; 384ee11f332SSteven Price 3858ec058fdSJames Morse sleep_cpu = smp_processor_id(); 38682869ac5SJames Morse ret = swsusp_save(); 38782869ac5SJames Morse } else { 3885ebe3a44SJames Morse /* Clean kernel core startup/idle code to PoC*/ 3895ebe3a44SJames Morse dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end); 3905ebe3a44SJames Morse dcache_clean_range(__idmap_text_start, __idmap_text_end); 3915ebe3a44SJames Morse 3925ebe3a44SJames Morse /* Clean kvm setup code to PoC? */ 393f7daa9c8SJames Morse if (el2_reset_needed()) { 3945ebe3a44SJames Morse dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); 395f7daa9c8SJames Morse dcache_clean_range(__hyp_text_start, __hyp_text_end); 396f7daa9c8SJames Morse } 39782869ac5SJames Morse 398ee11f332SSteven Price swsusp_mte_restore_tags(); 399ee11f332SSteven Price 400254a41c0SAKASHI Takahiro /* make the crash dump kernel image protected again */ 401254a41c0SAKASHI Takahiro crash_post_resume(); 402254a41c0SAKASHI Takahiro 40382869ac5SJames Morse /* 40482869ac5SJames Morse * Tell the hibernation core that we've just restored 40582869ac5SJames Morse * the memory 40682869ac5SJames Morse */ 40782869ac5SJames Morse in_suspend = 0; 40882869ac5SJames Morse 4098ec058fdSJames Morse sleep_cpu = -EINVAL; 41082869ac5SJames Morse __cpu_suspend_exit(); 411647d0519SMarc Zyngier 412647d0519SMarc Zyngier /* 413647d0519SMarc Zyngier * Just in case the boot kernel did turn the SSBD 414647d0519SMarc Zyngier * mitigation off behind our back, let's set the state 415647d0519SMarc Zyngier * to what we expect it to be. 416647d0519SMarc Zyngier */ 417c2876207SWill Deacon spectre_v4_enable_mitigation(NULL); 41882869ac5SJames Morse } 41982869ac5SJames Morse 4200fbeb318SJames Morse local_daif_restore(flags); 42182869ac5SJames Morse 42282869ac5SJames Morse return ret; 42382869ac5SJames Morse } 42482869ac5SJames Morse 42582869ac5SJames Morse /* 42682869ac5SJames Morse * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). 42782869ac5SJames Morse * 42882869ac5SJames Morse * Memory allocated by get_safe_page() will be dealt with by the hibernate code, 42982869ac5SJames Morse * we don't need to free it here. 43082869ac5SJames Morse */ 43182869ac5SJames Morse int swsusp_arch_resume(void) 43282869ac5SJames Morse { 433a89d7ff9SPavel Tatashin int rc; 43482869ac5SJames Morse void *zero_page; 43582869ac5SJames Morse size_t exit_size; 43682869ac5SJames Morse pgd_t *tmp_pg_dir; 43782869ac5SJames Morse phys_addr_t phys_hibernate_exit; 43882869ac5SJames Morse void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, 43982869ac5SJames Morse void *, phys_addr_t, phys_addr_t); 44082869ac5SJames Morse 44182869ac5SJames Morse /* 442dfbca61aSMark Rutland * Restoring the memory image will overwrite the ttbr1 page tables. 443dfbca61aSMark Rutland * Create a second copy of just the linear map, and use this when 444dfbca61aSMark Rutland * restoring. 445dfbca61aSMark Rutland */ 446a2c2e679SPavel Tatashin rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END); 447dfbca61aSMark Rutland if (rc) 448a89d7ff9SPavel Tatashin return rc; 449dfbca61aSMark Rutland 450dfbca61aSMark Rutland /* 451dfbca61aSMark Rutland * We need a zero page that is zero before & after resume in order to 452dfbca61aSMark Rutland * to break before make on the ttbr1 page tables. 453dfbca61aSMark Rutland */ 454dfbca61aSMark Rutland zero_page = (void *)get_safe_page(GFP_ATOMIC); 455dfbca61aSMark Rutland if (!zero_page) { 456117f5727SMark Rutland pr_err("Failed to allocate zero page.\n"); 457a89d7ff9SPavel Tatashin return -ENOMEM; 458dfbca61aSMark Rutland } 459dfbca61aSMark Rutland 460dfbca61aSMark Rutland /* 46182869ac5SJames Morse * Locate the exit code in the bottom-but-one page, so that *NULL 46282869ac5SJames Morse * still has disastrous affects. 46382869ac5SJames Morse */ 46482869ac5SJames Morse hibernate_exit = (void *)PAGE_SIZE; 46582869ac5SJames Morse exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; 46682869ac5SJames Morse /* 46782869ac5SJames Morse * Copy swsusp_arch_suspend_exit() to a safe page. This will generate 46882869ac5SJames Morse * a new set of ttbr0 page tables and load them. 46982869ac5SJames Morse */ 47082869ac5SJames Morse rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, 47182869ac5SJames Morse (unsigned long)hibernate_exit, 472051a7a94SPavel Tatashin &phys_hibernate_exit); 47382869ac5SJames Morse if (rc) { 474117f5727SMark Rutland pr_err("Failed to create safe executable page for hibernate_exit code.\n"); 475a89d7ff9SPavel Tatashin return rc; 47682869ac5SJames Morse } 47782869ac5SJames Morse 47882869ac5SJames Morse /* 47982869ac5SJames Morse * The hibernate exit text contains a set of el2 vectors, that will 48082869ac5SJames Morse * be executed at el2 with the mmu off in order to reload hyp-stub. 48182869ac5SJames Morse */ 48282869ac5SJames Morse __flush_dcache_area(hibernate_exit, exit_size); 48382869ac5SJames Morse 48482869ac5SJames Morse /* 48582869ac5SJames Morse * KASLR will cause the el2 vectors to be in a different location in 48682869ac5SJames Morse * the resumed kernel. Load hibernate's temporary copy into el2. 48782869ac5SJames Morse * 48882869ac5SJames Morse * We can skip this step if we booted at EL1, or are running with VHE. 48982869ac5SJames Morse */ 49082869ac5SJames Morse if (el2_reset_needed()) { 49182869ac5SJames Morse phys_addr_t el2_vectors = phys_hibernate_exit; /* base */ 49282869ac5SJames Morse el2_vectors += hibernate_el2_vectors - 49382869ac5SJames Morse __hibernate_exit_text_start; /* offset */ 49482869ac5SJames Morse 49582869ac5SJames Morse __hyp_set_vectors(el2_vectors); 49682869ac5SJames Morse } 49782869ac5SJames Morse 49882869ac5SJames Morse hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, 4992077be67SLaura Abbott resume_hdr.reenter_kernel, restore_pblist, 50082869ac5SJames Morse resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); 50182869ac5SJames Morse 502a89d7ff9SPavel Tatashin return 0; 50382869ac5SJames Morse } 5041fe492ceSJames Morse 5058ec058fdSJames Morse int hibernate_resume_nonboot_cpu_disable(void) 5068ec058fdSJames Morse { 5078ec058fdSJames Morse if (sleep_cpu < 0) { 5089165dabbSMasanari Iida pr_err("Failing to resume from hibernate on an unknown CPU.\n"); 5098ec058fdSJames Morse return -ENODEV; 5108ec058fdSJames Morse } 5118ec058fdSJames Morse 5128ec058fdSJames Morse return freeze_secondary_cpus(sleep_cpu); 5138ec058fdSJames Morse } 514