1caab277bSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
282869ac5SJames Morse/*
382869ac5SJames Morse * Hibernate low-level support
482869ac5SJames Morse *
582869ac5SJames Morse * Copyright (C) 2016 ARM Ltd.
682869ac5SJames Morse * Author:	James Morse <james.morse@arm.com>
782869ac5SJames Morse */
882869ac5SJames Morse#include <linux/linkage.h>
982869ac5SJames Morse#include <linux/errno.h>
1082869ac5SJames Morse
1182869ac5SJames Morse#include <asm/asm-offsets.h>
1282869ac5SJames Morse#include <asm/assembler.h>
1382869ac5SJames Morse#include <asm/cputype.h>
1482869ac5SJames Morse#include <asm/memory.h>
1582869ac5SJames Morse#include <asm/page.h>
1682869ac5SJames Morse#include <asm/virt.h>
1782869ac5SJames Morse
1882869ac5SJames Morse/*
1982869ac5SJames Morse * Resume from hibernate
2082869ac5SJames Morse *
2182869ac5SJames Morse * Loads temporary page tables then restores the memory image.
2282869ac5SJames Morse * Finally branches to cpu_resume() to restore the state saved by
2382869ac5SJames Morse * swsusp_arch_suspend().
2482869ac5SJames Morse *
2582869ac5SJames Morse * Because this code has to be copied to a 'safe' page, it can't call out to
2682869ac5SJames Morse * other functions by PC-relative address. Also remember that it may be
2782869ac5SJames Morse * mid-way through over-writing other functions. For this reason it contains
28*fade9c2cSFuad Tabba * code from caches_clean_inval_pou() and uses the copy_page() macro.
2982869ac5SJames Morse *
3082869ac5SJames Morse * This 'safe' page is mapped via ttbr0, and executed from there. This function
3182869ac5SJames Morse * switches to a copy of the linear map in ttbr1, performs the restore, then
3282869ac5SJames Morse * switches ttbr1 to the original kernel's swapper_pg_dir.
3382869ac5SJames Morse *
3482869ac5SJames Morse * All of memory gets written to, including code. We need to clean the kernel
3582869ac5SJames Morse * text to the Point of Coherence (PoC) before secondary cores can be booted.
3682869ac5SJames Morse * Because the kernel modules and executable pages mapped to user space are
3782869ac5SJames Morse * also written as data, we clean all pages we touch to the Point of
3882869ac5SJames Morse * Unification (PoU).
3982869ac5SJames Morse *
4082869ac5SJames Morse * x0: physical address of temporary page tables
4182869ac5SJames Morse * x1: physical address of swapper page tables
4282869ac5SJames Morse * x2: address of cpu_resume
4382869ac5SJames Morse * x3: linear map address of restore_pblist in the current kernel
4482869ac5SJames Morse * x4: physical address of __hyp_stub_vectors, or 0
4582869ac5SJames Morse * x5: physical address of a  zero page that remains zero after resume
4682869ac5SJames Morse */
4782869ac5SJames Morse.pushsection    ".hibernate_exit.text", "ax"
480343a7e4SMark BrownSYM_CODE_START(swsusp_arch_suspend_exit)
4982869ac5SJames Morse	/*
5082869ac5SJames Morse	 * We execute from ttbr0, change ttbr1 to our copied linear map tables
5182869ac5SJames Morse	 * with a break-before-make via the zero page
5282869ac5SJames Morse	 */
53c812026cSSteve Capper	break_before_make_ttbr_switch	x5, x0, x6, x8
5482869ac5SJames Morse
5582869ac5SJames Morse	mov	x21, x1
5682869ac5SJames Morse	mov	x30, x2
5782869ac5SJames Morse	mov	x24, x4
5882869ac5SJames Morse	mov	x25, x5
5982869ac5SJames Morse
6082869ac5SJames Morse	/* walk the restore_pblist and use copy_page() to over-write memory */
6182869ac5SJames Morse	mov	x19, x3
6282869ac5SJames Morse
6382869ac5SJames Morse1:	ldr	x10, [x19, #HIBERN_PBE_ORIG]
6482869ac5SJames Morse	mov	x0, x10
6582869ac5SJames Morse	ldr	x1, [x19, #HIBERN_PBE_ADDR]
6682869ac5SJames Morse
6782869ac5SJames Morse	copy_page	x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
6882869ac5SJames Morse
6982869ac5SJames Morse	add	x1, x10, #PAGE_SIZE
70*fade9c2cSFuad Tabba	/* Clean the copied page to PoU - based on caches_clean_inval_pou() */
71072f0a63SSuzuki K Poulose	raw_dcache_line_size x2, x3
7282869ac5SJames Morse	sub	x3, x2, #1
7382869ac5SJames Morse	bic	x4, x10, x3
7446710cf1SFuad Tabba2:	/* clean D line / unified line */
7546710cf1SFuad Tabbaalternative_insn "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
7682869ac5SJames Morse	add	x4, x4, x2
7782869ac5SJames Morse	cmp	x4, x1
7882869ac5SJames Morse	b.lo	2b
7982869ac5SJames Morse
8082869ac5SJames Morse	ldr	x19, [x19, #HIBERN_PBE_NEXT]
8182869ac5SJames Morse	cbnz	x19, 1b
8282869ac5SJames Morse	dsb	ish		/* wait for PoU cleaning to finish */
8382869ac5SJames Morse
8482869ac5SJames Morse	/* switch to the restored kernels page tables */
85c812026cSSteve Capper	break_before_make_ttbr_switch	x25, x21, x6, x8
8682869ac5SJames Morse
8782869ac5SJames Morse	ic	ialluis
8882869ac5SJames Morse	dsb	ish
8982869ac5SJames Morse	isb
9082869ac5SJames Morse
9182869ac5SJames Morse	cbz	x24, 3f		/* Do we need to re-initialise EL2? */
9282869ac5SJames Morse	hvc	#0
9382869ac5SJames Morse3:	ret
940343a7e4SMark BrownSYM_CODE_END(swsusp_arch_suspend_exit)
9582869ac5SJames Morse.popsection
96