Skip to content

Commit a584a90

Browse files
James MorseAlex Shi
authored andcommitted
arm64: kernel: Add support for hibernate/suspend-to-disk
Add support for hibernate/suspend-to-disk. Suspend borrows code from cpu_suspend() to write cpu state onto the stack, before calling swsusp_save() to save the memory image. Restore creates a set of temporary page tables, covering only the linear map, copies the restore code to a 'safe' page, then uses the copy to restore the memory image. The copied code executes in the lower half of the address space, and once complete, restores the original kernel's page tables. It then calls into cpu_resume(), and follows the normal cpu_suspend() path back into the suspend code. To restore a kernel using KASLR, the address of the page tables, and cpu_resume() are stored in the hibernate arch-header and the el2 vectors are pivotted via the 'safe' page in low memory. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Kevin Hilman <khilman@baylibre.com> # Tested on Juno R2 Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> (cherry picked from commit 82869ac57b5d3b550446932c918dbf2caf020c9e) Signed-off-by: Alex Shi <alex.shi@linaro.org> Conflicts: arch/arm64/kernel/Makefile
1 parent 3169fc9 commit a584a90

7 files changed

Lines changed: 673 additions & 0 deletions

File tree

arch/arm64/Kconfig

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -814,6 +814,14 @@ menu "Power management options"
814814

815815
source "kernel/power/Kconfig"
816816

817+
config ARCH_HIBERNATION_POSSIBLE
818+
def_bool y
819+
depends on CPU_PM
820+
821+
config ARCH_HIBERNATION_HEADER
822+
def_bool y
823+
depends on HIBERNATION
824+
817825
config ARCH_SUSPEND_POSSIBLE
818826
def_bool y
819827

arch/arm64/include/asm/suspend.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,4 +40,11 @@ extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
4040
extern void cpu_resume(void);
4141
int __cpu_suspend_enter(struct sleep_stack_data *state);
4242
void __cpu_suspend_exit(void);
43+
void _cpu_resume(void);
44+
45+
int swsusp_arch_suspend(void);
46+
int swsusp_arch_resume(void);
47+
int arch_hibernation_header_save(void *addr, unsigned int max_size);
48+
int arch_hibernation_header_restore(void *addr);
49+
4350
#endif

arch/arm64/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o
4040
arm64-obj-$(CONFIG_PCI) += pci.o
4141
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
4242
arm64-obj-$(CONFIG_ACPI) += acpi.o
43+
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
4344

4445
obj-y += $(arm64-obj-y) vdso/ probes/
4546
obj-m += $(arm64-obj-m)

arch/arm64/kernel/asm-offsets.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
#include <linux/mm.h>
2323
#include <linux/dma-mapping.h>
2424
#include <linux/kvm_host.h>
25+
#include <linux/suspend.h>
2526
#include <asm/thread_info.h>
2627
#include <asm/memory.h>
2728
#include <asm/smp_plat.h>
@@ -137,5 +138,9 @@ int main(void)
137138
#endif
138139
DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
139140
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
141+
BLANK();
142+
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
143+
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
144+
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
140145
return 0;
141146
}

arch/arm64/kernel/hibernate-asm.S

Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,176 @@
1+
/*
2+
* Hibernate low-level support
3+
*
4+
* Copyright (C) 2016 ARM Ltd.
5+
* Author: James Morse <james.morse@arm.com>
6+
*
7+
* This program is free software; you can redistribute it and/or modify
8+
* it under the terms of the GNU General Public License version 2 as
9+
* published by the Free Software Foundation.
10+
*
11+
* This program is distributed in the hope that it will be useful,
12+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14+
* GNU General Public License for more details.
15+
*
16+
* You should have received a copy of the GNU General Public License
17+
* along with this program. If not, see <http://www.gnu.org/licenses/>.
18+
*/
19+
#include <linux/linkage.h>
20+
#include <linux/errno.h>
21+
22+
#include <asm/asm-offsets.h>
23+
#include <asm/assembler.h>
24+
#include <asm/cputype.h>
25+
#include <asm/memory.h>
26+
#include <asm/page.h>
27+
#include <asm/virt.h>
28+
29+
/*
30+
* To prevent the possibility of old and new partial table walks being visible
31+
* in the tlb, switch the ttbr to a zero page when we invalidate the old
32+
* records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
33+
* Even switching to our copied tables will cause a changed output address at
34+
* each stage of the walk.
35+
*/
36+
.macro break_before_make_ttbr_switch zero_page, page_table
37+
msr ttbr1_el1, \zero_page
38+
isb
39+
tlbi vmalle1is
40+
dsb ish
41+
msr ttbr1_el1, \page_table
42+
isb
43+
.endm
44+
45+
46+
/*
47+
* Resume from hibernate
48+
*
49+
* Loads temporary page tables then restores the memory image.
50+
* Finally branches to cpu_resume() to restore the state saved by
51+
* swsusp_arch_suspend().
52+
*
53+
* Because this code has to be copied to a 'safe' page, it can't call out to
54+
* other functions by PC-relative address. Also remember that it may be
55+
* mid-way through over-writing other functions. For this reason it contains
56+
* code from flush_icache_range() and uses the copy_page() macro.
57+
*
58+
* This 'safe' page is mapped via ttbr0, and executed from there. This function
59+
* switches to a copy of the linear map in ttbr1, performs the restore, then
60+
* switches ttbr1 to the original kernel's swapper_pg_dir.
61+
*
62+
* All of memory gets written to, including code. We need to clean the kernel
63+
* text to the Point of Coherence (PoC) before secondary cores can be booted.
64+
* Because the kernel modules and executable pages mapped to user space are
65+
* also written as data, we clean all pages we touch to the Point of
66+
* Unification (PoU).
67+
*
68+
* x0: physical address of temporary page tables
69+
* x1: physical address of swapper page tables
70+
* x2: address of cpu_resume
71+
* x3: linear map address of restore_pblist in the current kernel
72+
* x4: physical address of __hyp_stub_vectors, or 0
73+
* x5: physical address of a zero page that remains zero after resume
74+
*/
75+
.pushsection ".hibernate_exit.text", "ax"
76+
ENTRY(swsusp_arch_suspend_exit)
77+
/*
78+
* We execute from ttbr0, change ttbr1 to our copied linear map tables
79+
* with a break-before-make via the zero page
80+
*/
81+
break_before_make_ttbr_switch x5, x0
82+
83+
mov x21, x1
84+
mov x30, x2
85+
mov x24, x4
86+
mov x25, x5
87+
88+
/* walk the restore_pblist and use copy_page() to over-write memory */
89+
mov x19, x3
90+
91+
1: ldr x10, [x19, #HIBERN_PBE_ORIG]
92+
mov x0, x10
93+
ldr x1, [x19, #HIBERN_PBE_ADDR]
94+
95+
copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
96+
97+
add x1, x10, #PAGE_SIZE
98+
/* Clean the copied page to PoU - based on flush_icache_range() */
99+
dcache_line_size x2, x3
100+
sub x3, x2, #1
101+
bic x4, x10, x3
102+
2: dc cvau, x4 /* clean D line / unified line */
103+
add x4, x4, x2
104+
cmp x4, x1
105+
b.lo 2b
106+
107+
ldr x19, [x19, #HIBERN_PBE_NEXT]
108+
cbnz x19, 1b
109+
dsb ish /* wait for PoU cleaning to finish */
110+
111+
/* switch to the restored kernels page tables */
112+
break_before_make_ttbr_switch x25, x21
113+
114+
ic ialluis
115+
dsb ish
116+
isb
117+
118+
cbz x24, 3f /* Do we need to re-initialise EL2? */
119+
hvc #0
120+
3: ret
121+
122+
.ltorg
123+
ENDPROC(swsusp_arch_suspend_exit)
124+
125+
/*
126+
* Restore the hyp stub.
127+
* This must be done before the hibernate page is unmapped by _cpu_resume(),
128+
* but happens before any of the hyp-stub's code is cleaned to PoC.
129+
*
130+
* x24: The physical address of __hyp_stub_vectors
131+
*/
132+
el1_sync:
133+
msr vbar_el2, x24
134+
eret
135+
ENDPROC(el1_sync)
136+
137+
.macro invalid_vector label
138+
\label:
139+
b \label
140+
ENDPROC(\label)
141+
.endm
142+
143+
invalid_vector el2_sync_invalid
144+
invalid_vector el2_irq_invalid
145+
invalid_vector el2_fiq_invalid
146+
invalid_vector el2_error_invalid
147+
invalid_vector el1_sync_invalid
148+
invalid_vector el1_irq_invalid
149+
invalid_vector el1_fiq_invalid
150+
invalid_vector el1_error_invalid
151+
152+
/* el2 vectors - switch el2 here while we restore the memory image. */
153+
.align 11
154+
ENTRY(hibernate_el2_vectors)
155+
ventry el2_sync_invalid // Synchronous EL2t
156+
ventry el2_irq_invalid // IRQ EL2t
157+
ventry el2_fiq_invalid // FIQ EL2t
158+
ventry el2_error_invalid // Error EL2t
159+
160+
ventry el2_sync_invalid // Synchronous EL2h
161+
ventry el2_irq_invalid // IRQ EL2h
162+
ventry el2_fiq_invalid // FIQ EL2h
163+
ventry el2_error_invalid // Error EL2h
164+
165+
ventry el1_sync // Synchronous 64-bit EL1
166+
ventry el1_irq_invalid // IRQ 64-bit EL1
167+
ventry el1_fiq_invalid // FIQ 64-bit EL1
168+
ventry el1_error_invalid // Error 64-bit EL1
169+
170+
ventry el1_sync_invalid // Synchronous 32-bit EL1
171+
ventry el1_irq_invalid // IRQ 32-bit EL1
172+
ventry el1_fiq_invalid // FIQ 32-bit EL1
173+
ventry el1_error_invalid // Error 32-bit EL1
174+
END(hibernate_el2_vectors)
175+
176+
.popsection

0 commit comments

Comments
 (0)