diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-27 15:44:34 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-27 15:44:34 -0700 |
| commit | dd161f74f8198c62f9bcf893f72c64bbb0d68b25 (patch) | |
| tree | 252fc0ab9d7322cdece0e5990a6513182c98f37b /kernel/trace/ftrace.c | |
| parent | 5c2a430e85994f4873ea5ec42091baa1153bc731 (diff) | |
| parent | dc208c69c033d3caba0509da1ae065d2b5ff165f (diff) | |
Merge tag 'trace-sorttable-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing / sorttable updates from Steven Rostedt:
- Implement arm64 build time sorting of the mcount location table
When gcc is used to build arm64, the mcount_loc section is all zeros
in the vmlinux elf file. The addresses are stored in the Elf_Rela
location.
To sort at build time, an array is allocated and the addresses are
added to it via the content of the mcount_loc section as well as he
Elf_Rela data. After sorting, the information is put back into the
Elf_Rela which now has the section sorted.
- Make sorting of mcount location table for arm64 work with clang as
well
When clang is used, the mcount_loc section contains the addresses,
unlike the gcc build. An array is still created and the sorting works
for both methods.
- Remove weak functions from the mcount_loc section
Have the sorttable code pass in the data of functions defined via
'nm -S' which shows the functions as well as their sizes. Using this
information the sorttable code can determine if a function in the
mcount_loc section was weak and overridden. If the function is not
found, it is set to be zero. On boot, when the mcount_loc section is
read and the ftrace table is created, if the address in the
mcount_loc is not in the kernel core text then it is removed and not
added to the ftrace_filter_functions (the functions that can be
attached by ftrace callbacks).
- Update and fix the reporting of how much data is used for ftrace
functions
On boot, a report of how many pages were used by the ftrace table as
well as how they were grouped (the table holds a list of sections
that are groups of pages that were able to be allocated). The
removing of the weak functions required the accounting to be updated.
* tag 'trace-sorttable-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
scripts/sorttable: Allow matches to functions before function entry
scripts/sorttable: Use normal sort if theres no relocs in the mcount section
ftrace: Check against is_kernel_text() instead of kaslr_offset()
ftrace: Test mcount_loc addr before calling ftrace_call_addr()
ftrace: Have ftrace pages output reflect freed pages
ftrace: Update the mcount_loc check of skipped entries
scripts/sorttable: Zero out weak functions in mcount_loc table
scripts/sorttable: Always use an array for the mcount_loc sorting
scripts/sorttable: Have mcount rela sort use direct values
arm64: scripts/sorttable: Implement sorting mcount_loc at boot for arm64
Diffstat (limited to 'kernel/trace/ftrace.c')
| -rw-r--r-- | kernel/trace/ftrace.c | 55 |
1 files changed, 50 insertions, 5 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fc88e0688daf..97c1be9f727a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -7016,6 +7016,7 @@ static int ftrace_process_locs(struct module *mod, unsigned long *p; unsigned long addr; unsigned long flags = 0; /* Shut up gcc */ + unsigned long pages; int ret = -ENOMEM; count = end - start; @@ -7023,6 +7024,8 @@ static int ftrace_process_locs(struct module *mod, if (!count) return 0; + pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); + /* * Sorting mcount in vmlinux at build time depend on * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in @@ -7067,7 +7070,9 @@ static int ftrace_process_locs(struct module *mod, pg = start_pg; while (p < end) { unsigned long end_offset; - addr = ftrace_call_adjust(*p++); + + addr = *p++; + /* * Some architecture linkers will pad between * the different mcount_loc sections of different @@ -7079,6 +7084,19 @@ static int ftrace_process_locs(struct module *mod, continue; } + /* + * If this is core kernel, make sure the address is in core + * or inittext, as weak functions get zeroed and KASLR can + * move them to something other than zero. It just will not + * move it to an area where kernel text is. + */ + if (!mod && !(is_kernel_text(addr) || is_kernel_inittext(addr))) { + skipped++; + continue; + } + + addr = ftrace_call_adjust(addr); + end_offset = (pg->index+1) * sizeof(pg->records[0]); if (end_offset > PAGE_SIZE << pg->order) { /* We should have allocated enough */ @@ -7118,11 +7136,41 @@ static int ftrace_process_locs(struct module *mod, /* We should have used all pages unless we skipped some */ if (pg_unuse) { - WARN_ON(!skipped); + unsigned long pg_remaining, remaining = 0; + unsigned long skip; + + /* Count the number of entries unused and compare it to skipped. */ + pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index; + + if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) { + + skip = skipped - pg_remaining; + + for (pg = pg_unuse; pg; pg = pg->next) + remaining += 1 << pg->order; + + pages -= remaining; + + skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE); + + /* + * Check to see if the number of pages remaining would + * just fit the number of entries skipped. + */ + WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped", + remaining, skipped); + } /* Need to synchronize with ftrace_location_range() */ synchronize_rcu(); ftrace_free_pages(pg_unuse); } + + if (!mod) { + count -= skipped; + pr_info("ftrace: allocating %ld entries in %ld pages\n", + count, pages); + } + return ret; } @@ -7768,9 +7816,6 @@ void __init ftrace_init(void) goto failed; } - pr_info("ftrace: allocating %ld entries in %ld pages\n", - count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); - ret = ftrace_process_locs(NULL, __start_mcount_loc, __stop_mcount_loc); |