1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Hibernation support for x86-64 4 * 5 * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl> 6 * Copyright 2005 Andi Kleen <ak@suse.de> 7 * Copyright 2004 Pavel Machek <pavel@suse.cz> 8 * 9 * swsusp_arch_resume must not use any stack or any nonlocal variables while 10 * copying pages: 11 * 12 * Its rewriting one kernel image with another. What is stack in "old" 13 * image could very well be data page in "new" image, and overwriting 14 * your own stack under you is bad idea. 15 */ 16 17 .text 18#include <linux/linkage.h> 19#include <asm/segment.h> 20#include <asm/page_types.h> 21#include <asm/asm-offsets.h> 22#include <asm/processor-flags.h> 23#include <asm/frame.h> 24 25SYM_FUNC_START(swsusp_arch_suspend) 26 movq $saved_context, %rax 27 movq %rsp, pt_regs_sp(%rax) 28 movq %rbp, pt_regs_bp(%rax) 29 movq %rsi, pt_regs_si(%rax) 30 movq %rdi, pt_regs_di(%rax) 31 movq %rbx, pt_regs_bx(%rax) 32 movq %rcx, pt_regs_cx(%rax) 33 movq %rdx, pt_regs_dx(%rax) 34 movq %r8, pt_regs_r8(%rax) 35 movq %r9, pt_regs_r9(%rax) 36 movq %r10, pt_regs_r10(%rax) 37 movq %r11, pt_regs_r11(%rax) 38 movq %r12, pt_regs_r12(%rax) 39 movq %r13, pt_regs_r13(%rax) 40 movq %r14, pt_regs_r14(%rax) 41 movq %r15, pt_regs_r15(%rax) 42 pushfq 43 popq pt_regs_flags(%rax) 44 45 /* save cr3 */ 46 movq %cr3, %rax 47 movq %rax, restore_cr3(%rip) 48 49 FRAME_BEGIN 50 call swsusp_save 51 FRAME_END 52 ret 53SYM_FUNC_END(swsusp_arch_suspend) 54 55SYM_CODE_START(restore_image) 56 /* prepare to jump to the image kernel */ 57 movq restore_jump_address(%rip), %r8 58 movq restore_cr3(%rip), %r9 59 60 /* prepare to switch to temporary page tables */ 61 movq temp_pgt(%rip), %rax 62 movq mmu_cr4_features(%rip), %rbx 63 64 /* prepare to copy image data to their original locations */ 65 movq restore_pblist(%rip), %rdx 66 67 /* jump to relocated restore code */ 68 movq relocated_restore_code(%rip), %rcx 69 jmpq *%rcx 70SYM_CODE_END(restore_image) 71 72 /* code below has been relocated to a safe page */ 73SYM_CODE_START(core_restore_code) 74 /* switch to temporary page tables */ 75 movq %rax, %cr3 76 /* flush TLB */ 77 movq %rbx, %rcx 78 andq $~(X86_CR4_PGE), %rcx 79 movq %rcx, %cr4; # turn off PGE 80 movq %cr3, %rcx; # flush TLB 81 movq %rcx, %cr3; 82 movq %rbx, %cr4; # turn PGE back on 83.Lloop: 84 testq %rdx, %rdx 85 jz .Ldone 86 87 /* get addresses from the pbe and copy the page */ 88 movq pbe_address(%rdx), %rsi 89 movq pbe_orig_address(%rdx), %rdi 90 movq $(PAGE_SIZE >> 3), %rcx 91 rep 92 movsq 93 94 /* progress to the next pbe */ 95 movq pbe_next(%rdx), %rdx 96 jmp .Lloop 97 98.Ldone: 99 /* jump to the restore_registers address from the image header */ 100 jmpq *%r8 101SYM_CODE_END(core_restore_code) 102 103 /* code below belongs to the image kernel */ 104 .align PAGE_SIZE 105SYM_FUNC_START(restore_registers) 106 /* go back to the original page tables */ 107 movq %r9, %cr3 108 109 /* Flush TLB, including "global" things (vmalloc) */ 110 movq mmu_cr4_features(%rip), %rax 111 movq %rax, %rdx 112 andq $~(X86_CR4_PGE), %rdx 113 movq %rdx, %cr4; # turn off PGE 114 movq %cr3, %rcx; # flush TLB 115 movq %rcx, %cr3 116 movq %rax, %cr4; # turn PGE back on 117 118 /* We don't restore %rax, it must be 0 anyway */ 119 movq $saved_context, %rax 120 movq pt_regs_sp(%rax), %rsp 121 movq pt_regs_bp(%rax), %rbp 122 movq pt_regs_si(%rax), %rsi 123 movq pt_regs_di(%rax), %rdi 124 movq pt_regs_bx(%rax), %rbx 125 movq pt_regs_cx(%rax), %rcx 126 movq pt_regs_dx(%rax), %rdx 127 movq pt_regs_r8(%rax), %r8 128 movq pt_regs_r9(%rax), %r9 129 movq pt_regs_r10(%rax), %r10 130 movq pt_regs_r11(%rax), %r11 131 movq pt_regs_r12(%rax), %r12 132 movq pt_regs_r13(%rax), %r13 133 movq pt_regs_r14(%rax), %r14 134 movq pt_regs_r15(%rax), %r15 135 pushq pt_regs_flags(%rax) 136 popfq 137 138 /* Saved in save_processor_state. */ 139 lgdt saved_context_gdt_desc(%rax) 140 141 xorl %eax, %eax 142 143 /* tell the hibernation core that we've just restored the memory */ 144 movq %rax, in_suspend(%rip) 145 146 ret 147SYM_FUNC_END(restore_registers) 148