3434#include <asm/pgtable-hwdef.h>
3535#include <asm/sections.h>
3636#include <asm/suspend.h>
37+ #include <asm/sysreg.h>
3738#include <asm/virt.h>
3839
3940/*
@@ -216,12 +217,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
216217 set_pte (pte , __pte (virt_to_phys ((void * )dst ) |
217218 pgprot_val (PAGE_KERNEL_EXEC )));
218219
219- /* Load our new page tables */
220- asm volatile ("msr ttbr0_el1, %0;"
221- "isb;"
222- "tlbi vmalle1is;"
223- "dsb ish;"
224- "isb" : : "r" (virt_to_phys (pgd )));
220+ /*
221+ * Load our new page tables. A strict BBM approach requires that we
222+ * ensure that TLBs are free of any entries that may overlap with the
223+ * global mappings we are about to install.
224+ *
225+ * For a real hibernate/resume cycle TTBR0 currently points to a zero
226+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
227+ * runtime services), while for a userspace-driven test_resume cycle it
228+ * points to userspace page tables (and we must point it at a zero page
229+ * ourselves). Elsewhere we only (un)install the idmap with preemption
230+ * disabled, so T0SZ should be as required regardless.
231+ */
232+ cpu_set_reserved_ttbr0 ();
233+ local_flush_tlb_all ();
234+ write_sysreg (virt_to_phys (pgd ), ttbr0_el1 );
235+ isb ();
225236
226237 * phys_dst_addr = virt_to_phys ((void * )dst );
227238
@@ -387,6 +398,38 @@ int swsusp_arch_resume(void)
387398 void __noreturn (* hibernate_exit )(phys_addr_t , phys_addr_t , void * ,
388399 void * , phys_addr_t , phys_addr_t );
389400
401+ /*
402+ * Restoring the memory image will overwrite the ttbr1 page tables.
403+ * Create a second copy of just the linear map, and use this when
404+ * restoring.
405+ */
406+ tmp_pg_dir = (pgd_t * )get_safe_page (GFP_ATOMIC );
407+ if (!tmp_pg_dir ) {
408+ pr_err ("Failed to allocate memory for temporary page tables." );
409+ rc = - ENOMEM ;
410+ goto out ;
411+ }
412+ rc = copy_page_tables (tmp_pg_dir , PAGE_OFFSET , 0 );
413+ if (rc )
414+ goto out ;
415+
416+ /*
417+ * Since we only copied the linear map, we need to find restore_pblist's
418+ * linear map address.
419+ */
420+ lm_restore_pblist = LMADDR (restore_pblist );
421+
422+ /*
423+ * We need a zero page that is zero before & after resume in order to
424+ * to break before make on the ttbr1 page tables.
425+ */
426+ zero_page = (void * )get_safe_page (GFP_ATOMIC );
427+ if (!zero_page ) {
428+ pr_err ("Failed to allocate zero page." );
429+ rc = - ENOMEM ;
430+ goto out ;
431+ }
432+
390433 /*
391434 * Locate the exit code in the bottom-but-one page, so that *NULL
392435 * still has disastrous affects.
@@ -412,27 +455,6 @@ int swsusp_arch_resume(void)
412455 */
413456 __flush_dcache_area (hibernate_exit , exit_size );
414457
415- /*
416- * Restoring the memory image will overwrite the ttbr1 page tables.
417- * Create a second copy of just the linear map, and use this when
418- * restoring.
419- */
420- tmp_pg_dir = (pgd_t * )get_safe_page (GFP_ATOMIC );
421- if (!tmp_pg_dir ) {
422- pr_err ("Failed to allocate memory for temporary page tables." );
423- rc = - ENOMEM ;
424- goto out ;
425- }
426- rc = copy_page_tables (tmp_pg_dir , PAGE_OFFSET , 0 );
427- if (rc )
428- goto out ;
429-
430- /*
431- * Since we only copied the linear map, we need to find restore_pblist's
432- * linear map address.
433- */
434- lm_restore_pblist = LMADDR (restore_pblist );
435-
436458 /*
437459 * KASLR will cause the el2 vectors to be in a different location in
438460 * the resumed kernel. Load hibernate's temporary copy into el2.
@@ -447,12 +469,6 @@ int swsusp_arch_resume(void)
447469 __hyp_set_vectors (el2_vectors );
448470 }
449471
450- /*
451- * We need a zero page that is zero before & after resume in order to
452- * to break before make on the ttbr1 page tables.
453- */
454- zero_page = (void * )get_safe_page (GFP_ATOMIC );
455-
456472 hibernate_exit (virt_to_phys (tmp_pg_dir ), resume_hdr .ttbr1_el1 ,
457473 resume_hdr .reenter_kernel , lm_restore_pblist ,
458474 resume_hdr .__hyp_stub_vectors , virt_to_phys (zero_page ));
0 commit comments