Skip to content

Commit 4d4fe8f

Browse files
James MorseAlex Shi
authored andcommitted
PM / Hibernate: Call flush_icache_range() on pages restored in-place
Some architectures require code written to memory as if it were data to be 'cleaned' from any data caches before the processor can fetch them as new instructions. During resume from hibernate, the snapshot code copies some pages directly, meaning these architectures do not get a chance to perform their cache maintenance. Modify the read and decompress code to call flush_icache_range() on all pages that are restored, so that the restored in-place pages are guaranteed to be executable on these architectures. Signed-off-by: James Morse <james.morse@arm.com> Acked-by: Pavel Machek <pavel@ucw.cz> Acked-by: Rafael J. Wysocki <rjw@rjwysocki.net> Acked-by: Catalin Marinas <catalin.marinas@arm.com> [will: make clean_pages_on_* static and remove initialisers] Signed-off-by: Will Deacon <will.deacon@arm.com> (cherry picked from commit f6cf0545ec697ddc278b7457b7d0c0d86a2ea88e) Signed-off-by: Alex Shi <alex.shi@linaro.org>
1 parent 5249f3c commit 4d4fe8f

1 file changed

Lines changed: 18 additions & 0 deletions

File tree

kernel/power/swap.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,14 @@
3636

3737
#define HIBERNATE_SIG "S1SUSPEND"
3838

39+
/*
40+
* When reading an {un,}compressed image, we may restore pages in place,
41+
* in which case some architectures need these pages cleaning before they
42+
* can be executed. We don't know which pages these may be, so clean the lot.
43+
*/
44+
static bool clean_pages_on_read;
45+
static bool clean_pages_on_decompress;
46+
3947
/*
4048
* The swap map is a data structure used for keeping track of each page
4149
* written to a swap partition. It consists of many swap_map_page
@@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio)
241249

242250
if (bio_data_dir(bio) == WRITE)
243251
put_page(page);
252+
else if (clean_pages_on_read)
253+
flush_icache_range((unsigned long)page_address(page),
254+
(unsigned long)page_address(page) + PAGE_SIZE);
244255

245256
if (bio->bi_error && !hb->error)
246257
hb->error = bio->bi_error;
@@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle,
10491060

10501061
hib_init_batch(&hb);
10511062

1063+
clean_pages_on_read = true;
10521064
printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
10531065
nr_to_read);
10541066
m = nr_to_read / 10;
@@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data)
11241136
d->unc_len = LZO_UNC_SIZE;
11251137
d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
11261138
d->unc, &d->unc_len);
1139+
if (clean_pages_on_decompress)
1140+
flush_icache_range((unsigned long)d->unc,
1141+
(unsigned long)d->unc + d->unc_len);
1142+
11271143
atomic_set(&d->stop, 1);
11281144
wake_up(&d->done);
11291145
}
@@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
11891205
}
11901206
memset(crc, 0, offsetof(struct crc_data, go));
11911207

1208+
clean_pages_on_decompress = true;
1209+
11921210
/*
11931211
* Start the decompression threads.
11941212
*/

0 commit comments

Comments
 (0)