Skip to content

Commit 88e4909

Browse files
committed
Merge tag 'ftrace-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull ftrace fix from Steven Rostedt: - Fix allocation accounting on boot up The ftrace records for each function that ftrace can attach to is done in a group of pages. At boot up, the number of pages are calculated and allocated. After that, the pages are filled with data. It may allocate more than needed due to some functions not being recorded (because they are unused weak functions), this too is recorded. After the data is filled in, a check is made to make sure the right number of pages were allocated. But this was off due to the assumption that the same number of entries fit per every page. Because the size of an entry does not evenly divide into PAGE_SIZE, there is a rounding error when a large number of pages is allocated to hold the events. This causes the check to fail and triggers a warning. Fix the accounting by finding out how many pages are actually allocated from the functions that allocate them and use that to see if all the pages allocated were used and the ones not used are properly freed. * tag 'ftrace-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: ftrace: Do not over-allocate ftrace memory
2 parents 603c05a + be55257 commit 88e4909

1 file changed

Lines changed: 15 additions & 14 deletions

File tree

kernel/trace/ftrace.c

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1148,7 +1148,6 @@ struct ftrace_page {
11481148
};
11491149

11501150
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1151-
#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
11521151

11531152
static struct ftrace_page *ftrace_pages_start;
11541153
static struct ftrace_page *ftrace_pages;
@@ -3834,7 +3833,8 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
38343833
return 0;
38353834
}
38363835

3837-
static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3836+
static int ftrace_allocate_records(struct ftrace_page *pg, int count,
3837+
unsigned long *num_pages)
38383838
{
38393839
int order;
38403840
int pages;
@@ -3844,7 +3844,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
38443844
return -EINVAL;
38453845

38463846
/* We want to fill as much as possible, with no empty pages */
3847-
pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3847+
pages = DIV_ROUND_UP(count * ENTRY_SIZE, PAGE_SIZE);
38483848
order = fls(pages) - 1;
38493849

38503850
again:
@@ -3859,6 +3859,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
38593859
}
38603860

38613861
ftrace_number_of_pages += 1 << order;
3862+
*num_pages += 1 << order;
38623863
ftrace_number_of_groups++;
38633864

38643865
cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
@@ -3887,12 +3888,14 @@ static void ftrace_free_pages(struct ftrace_page *pages)
38873888
}
38883889

38893890
static struct ftrace_page *
3890-
ftrace_allocate_pages(unsigned long num_to_init)
3891+
ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages)
38913892
{
38923893
struct ftrace_page *start_pg;
38933894
struct ftrace_page *pg;
38943895
int cnt;
38953896

3897+
*num_pages = 0;
3898+
38963899
if (!num_to_init)
38973900
return NULL;
38983901

@@ -3906,7 +3909,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
39063909
* waste as little space as possible.
39073910
*/
39083911
for (;;) {
3909-
cnt = ftrace_allocate_records(pg, num_to_init);
3912+
cnt = ftrace_allocate_records(pg, num_to_init, num_pages);
39103913
if (cnt < 0)
39113914
goto free_pages;
39123915

@@ -7192,8 +7195,6 @@ static int ftrace_process_locs(struct module *mod,
71927195
if (!count)
71937196
return 0;
71947197

7195-
pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
7196-
71977198
/*
71987199
* Sorting mcount in vmlinux at build time depend on
71997200
* CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
@@ -7206,7 +7207,7 @@ static int ftrace_process_locs(struct module *mod,
72067207
test_is_sorted(start, count);
72077208
}
72087209

7209-
start_pg = ftrace_allocate_pages(count);
7210+
start_pg = ftrace_allocate_pages(count, &pages);
72107211
if (!start_pg)
72117212
return -ENOMEM;
72127213

@@ -7305,27 +7306,27 @@ static int ftrace_process_locs(struct module *mod,
73057306
/* We should have used all pages unless we skipped some */
73067307
if (pg_unuse) {
73077308
unsigned long pg_remaining, remaining = 0;
7308-
unsigned long skip;
7309+
long skip;
73097310

73107311
/* Count the number of entries unused and compare it to skipped. */
7311-
pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index;
7312+
pg_remaining = (PAGE_SIZE << pg->order) / ENTRY_SIZE - pg->index;
73127313

73137314
if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) {
73147315

73157316
skip = skipped - pg_remaining;
73167317

7317-
for (pg = pg_unuse; pg; pg = pg->next)
7318+
for (pg = pg_unuse; pg && skip > 0; pg = pg->next) {
73187319
remaining += 1 << pg->order;
7320+
skip -= (PAGE_SIZE << pg->order) / ENTRY_SIZE;
7321+
}
73197322

73207323
pages -= remaining;
73217324

7322-
skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE);
7323-
73247325
/*
73257326
* Check to see if the number of pages remaining would
73267327
* just fit the number of entries skipped.
73277328
*/
7328-
WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped",
7329+
WARN(pg || skip > 0, "Extra allocated pages for ftrace: %lu with %lu skipped",
73297330
remaining, skipped);
73307331
}
73317332
/* Need to synchronize with ftrace_location_range() */

0 commit comments

Comments
 (0)