4949 orr \dst , \dst , \mask // dst|=(aff3>>rs3)
5050 .endm
5151/ *
52- * Save CPU state for a suspend and execute the suspend finisher.
53- * On success it will return 0 through cpu_resume - ie through a CPU
54- * soft/hard reboot from the reset vector.
55- * On failure it returns the suspend finisher return value or force
56- * - EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
57- * is not allowed to return , if it does this must be considered failure).
58- * It saves callee registers , and allocates space on the kernel stack
59- * to save the CPU specific registers + some other data for resume.
52+ * Save CPU state in the provided sleep_stack_data area , and publish its
53+ * location for cpu_resume()'s use in sleep_save_stash.
6054 *
61- * x0 = suspend finisher argument
62- * x1 = suspend finisher function pointer
55+ * cpu_resume() will restore this saved state , and return. Because the
56+ * link - register is saved and restored , it will appear to return from this
57+ * function. So th at the caller can tell the suspend/resume paths apart ,
58+ * __cpu_suspend_enter() will always return a non - zero value , whereas the
59+ * path through cpu_resume() will return 0 .
60+ *
61+ * x0 = struct sleep_stack_data area
6362 * /
6463ENTRY(__cpu_suspend_enter)
65- stp x29 , lr , [ sp , # - 96 ] !
66- stp x19 , x20 , [ sp , # 16 ]
67- stp x21 , x22 , [ sp , # 32 ]
68- stp x23 , x24 , [ sp , # 48 ]
69- stp x25 , x26 , [ sp , # 64 ]
70- stp x27 , x28 , [ sp , # 80 ]
71- / *
72- * Stash suspend finisher and its argument in x20 and x19
73- * /
74- mov x19 , x0
75- mov x20 , x1
64+ stp x29 , lr , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS ]
65+ stp x19 , x20 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 16 ]
66+ stp x21 , x22 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 32 ]
67+ stp x23 , x24 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 48 ]
68+ stp x25 , x26 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 64 ]
69+ stp x27 , x28 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 80 ]
70+
71+ / * save the sp in cpu_suspend_ctx * /
7672 mov x2 , sp
77- sub sp , sp , #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
78- mov x0 , sp
79- / *
80- * x0 now points to struct cpu_suspend_ctx allocated on the stack
81- * /
82- str x2 , [ x0 , #CPU_CTX_SP ]
73+ str x2 , [ x0 , #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP ]
74+
75+ / * find the mpidr_hash * /
8376 ldr x1 , =sleep_save_sp
8477 ldr x1 , [ x1 , #SLEEP_SAVE_SP_VIRT ]
8578 mrs x7 , mpidr_el1
@@ -93,34 +86,11 @@ ENTRY(__cpu_suspend_enter)
9386 ldp w5 , w6 , [ x9 , #(MPIDR_HASH_SHIFTS + 8 ) ]
9487 compute_mpidr_hash x8 , x3 , x4 , x5 , x6 , x7 , x10
9588 add x1 , x1 , x8 , lsl # 3
89+
90+ stp x29 , lr , [ sp , # - 16 ] !
9691 bl __cpu_suspend_save
97- / *
98- * Grab suspend finisher in x20 and its argument in x19
99- * /
100- mov x0 , x19
101- mov x1 , x20
102- / *
103- * We are ready for power down , fire off the suspend finisher
104- * in x1 , with argument in x0
105- * /
106- blr x1
107- / *
108- * Never gets here , unless suspend finisher fails.
109- * Successful cpu_suspend should return from cpu_resume , returning
110- * through this code path is considered an error
111- * If the return value is set to 0 force x0 = - EOPNOTSUPP
112- * to make sure a proper error condition is propagated
113- * /
114- cmp x0 , # 0
115- mov x3 , # - EOPNOTSUPP
116- csel x0 , x3 , x0 , eq
117- add sp , sp , #CPU_SUSPEND_SZ // rewind stack pointer
118- ldp x19 , x20 , [ sp , # 16 ]
119- ldp x21 , x22 , [ sp , # 32 ]
120- ldp x23 , x24 , [ sp , # 48 ]
121- ldp x25 , x26 , [ sp , # 64 ]
122- ldp x27 , x28 , [ sp , # 80 ]
123- ldp x29 , lr , [ sp ], # 96
92+ ldp x29 , lr , [ sp ], # 16
93+ mov x0 , # 1
12494 ret
12595ENDPROC(__cpu_suspend_enter)
12696 .ltorg
@@ -146,12 +116,6 @@ ENDPROC(cpu_resume_mmu)
146116 .popsection
147117cpu_resume_after_mmu:
148118 mov x0 , # 0 // return zero on success
149- ldp x19 , x20 , [ sp , # 16 ]
150- ldp x21 , x22 , [ sp , # 32 ]
151- ldp x23 , x24 , [ sp , # 48 ]
152- ldp x25 , x26 , [ sp , # 64 ]
153- ldp x27 , x28 , [ sp , # 80 ]
154- ldp x29 , lr , [ sp ], # 96
155119 ret
156120ENDPROC(cpu_resume_after_mmu)
157121
@@ -168,6 +132,8 @@ ENTRY(cpu_resume)
168132 / * x7 contains hash index , let's use it to grab context pointer * /
169133 ldr_l x0 , sleep_save_sp + SLEEP_SAVE_SP_PHYS
170134 ldr x0 , [ x0 , x7 , lsl # 3 ]
135+ add x29 , x0 , #SLEEP_STACK_DATA_CALLEE_REGS
136+ add x0 , x0 , #SLEEP_STACK_DATA_SYSTEM_REGS
171137 / * load sp from context * /
172138 ldr x2 , [ x0 , #CPU_CTX_SP ]
173139 / * load physical address of identity map page table in x1 * /
@@ -178,5 +144,12 @@ ENTRY(cpu_resume)
178144 * pointer and x1 to contain physical address of 1 : 1 page tables
179145 * /
180146 bl cpu_do_resume // PC relative jump , MMU off
147+ / * Can't access these by physical address once the MMU is on * /
148+ ldp x19 , x20 , [ x29 , # 16 ]
149+ ldp x21 , x22 , [ x29 , # 32 ]
150+ ldp x23 , x24 , [ x29 , # 48 ]
151+ ldp x25 , x26 , [ x29 , # 64 ]
152+ ldp x27 , x28 , [ x29 , # 80 ]
153+ ldp x29 , lr , [ x29 ]
181154 b cpu_resume_mmu // Resume MMU , never returns
182155ENDPROC(cpu_resume)
0 commit comments