Skip to content

Commit 89a3ecc

Browse files
prati0100akpm00
authored andcommitted
kho: make sure page being restored is actually from KHO
When restoring a page, no sanity checks are done to make sure the page actually came from a kexec handover. The caller is trusted to pass in the right address. If the caller has a bug and passes in a wrong address, an in-use page might be "restored" and returned, causing all sorts of memory corruption. Harden the page restore logic by stashing in a magic number in page->private along with the order. If the magic number does not match, the page won't be touched. page->private is an unsigned long. The union kho_page_info splits it into two parts, with one holding the order and the other holding the magic number. Link: https://lkml.kernel.org/r/20250917125725.665-2-pratyush@kernel.org Signed-off-by: Pratyush Yadav <pratyush@kernel.org> Cc: Alexander Graf <graf@amazon.com> Cc: Baoquan He <bhe@redhat.com> Cc: Changyuan Lyu <changyuanl@google.com> Cc: Chris Li <chrisl@kernel.org> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 20571b1 commit 89a3ecc

1 file changed

Lines changed: 34 additions & 7 deletions

File tree

kernel/kexec_handover.c

Lines changed: 34 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,22 @@
3232
#define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
3333
#define PROP_SUB_FDT "fdt"
3434

35+
#define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
36+
37+
/*
38+
* KHO uses page->private, which is an unsigned long, to store page metadata.
39+
* Use it to store both the magic and the order.
40+
*/
41+
union kho_page_info {
42+
unsigned long page_private;
43+
struct {
44+
unsigned int order;
45+
unsigned int magic;
46+
};
47+
};
48+
49+
static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
50+
3551
static bool kho_enable __ro_after_init;
3652

3753
bool kho_is_enabled(void)
@@ -186,25 +202,33 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
186202
static struct page *kho_restore_page(phys_addr_t phys)
187203
{
188204
struct page *page = pfn_to_online_page(PHYS_PFN(phys));
189-
unsigned int nr_pages, order;
205+
union kho_page_info info;
206+
unsigned int nr_pages;
190207

191208
if (!page)
192209
return NULL;
193210

194-
order = page->private;
195-
if (order > MAX_PAGE_ORDER)
211+
info.page_private = page->private;
212+
/*
213+
* deserialize_bitmap() only sets the magic on the head page. This magic
214+
* check also implicitly makes sure phys is order-aligned since for
215+
* non-order-aligned phys addresses, magic will never be set.
216+
*/
217+
if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER))
196218
return NULL;
197-
nr_pages = (1 << order);
219+
nr_pages = (1 << info.order);
198220

221+
/* Clear private to make sure later restores on this page error out. */
222+
page->private = 0;
199223
/* Head page gets refcount of 1. */
200224
set_page_count(page, 1);
201225

202226
/* For higher order folios, tail pages get a page count of zero. */
203227
for (unsigned int i = 1; i < nr_pages; i++)
204228
set_page_count(page + i, 0);
205229

206-
if (order > 0)
207-
prep_compound_page(page, order);
230+
if (info.order > 0)
231+
prep_compound_page(page, info.order);
208232

209233
adjust_managed_page_count(page, nr_pages);
210234
return page;
@@ -341,10 +365,13 @@ static void __init deserialize_bitmap(unsigned int order,
341365
phys_addr_t phys =
342366
elm->phys_start + (bit << (order + PAGE_SHIFT));
343367
struct page *page = phys_to_page(phys);
368+
union kho_page_info info;
344369

345370
memblock_reserve(phys, sz);
346371
memblock_reserved_mark_noinit(phys, sz);
347-
page->private = order;
372+
info.magic = KHO_PAGE_MAGIC;
373+
info.order = order;
374+
page->private = info.page_private;
348375
}
349376
}
350377

0 commit comments

Comments
 (0)