summaryrefslogtreecommitdiff
path: root/tools/objtool/trace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-01 20:18:59 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-01 20:18:59 -0800
commit63e6995005be8ceb8a1d56a18df1a1a40c28356d (patch)
tree9dc6af0e1ab47710fe21cf7cfd95b1d28b6bec27 /tools/objtool/trace.c
parentb53440f8e5a1466870d7a1d255e0f9966e0041fb (diff)
parent6ec33db1aaf06a76fb063610e668f8e12f32ebbf (diff)
Merge tag 'objtool-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull objtool updates from Ingo Molnar: - klp-build livepatch module generation (Josh Poimboeuf) Introduce new objtool features and a klp-build script to generate livepatch modules using a source .patch as input. This builds on concepts from the longstanding out-of-tree kpatch project which began in 2012 and has been used for many years to generate livepatch modules for production kernels. However, this is a complete rewrite which incorporates hard-earned lessons from 12+ years of maintaining kpatch. Key improvements compared to kpatch-build: - Integrated with objtool: Leverages objtool's existing control-flow graph analysis to help detect changed functions. - Works on vmlinux.o: Supports late-linked objects, making it compatible with LTO, IBT, and similar. - Simplified code base: ~3k fewer lines of code. - Upstream: No more out-of-tree #ifdef hacks, far less cruft. - Cleaner internals: Vastly simplified logic for symbol/section/reloc inclusion and special section extraction. - Robust __LINE__ macro handling: Avoids false positive binary diffs caused by the __LINE__ macro by introducing a fix-patch-lines script which injects #line directives into the source .patch to preserve the original line numbers at compile time. - Disassemble code with libopcodes instead of running objdump (Alexandre Chartre) - Disassemble support (-d option to objtool) by Alexandre Chartre, which supports the decoding of various Linux kernel code generation specials such as alternatives: 17ef: sched_balance_find_dst_group+0x62f mov 0x34(%r9),%edx 17f3: sched_balance_find_dst_group+0x633 | <alternative.17f3> | X86_FEATURE_POPCNT 17f3: sched_balance_find_dst_group+0x633 | call 0x17f8 <__sw_hweight64> | popcnt %rdi,%rax 17f8: sched_balance_find_dst_group+0x638 cmp %eax,%edx ... jump table alternatives: 1895: sched_use_asym_prio+0x5 test $0x8,%ch 1898: sched_use_asym_prio+0x8 je 0x18a9 <sched_use_asym_prio+0x19> 189a: sched_use_asym_prio+0xa | <jump_table.189a> | JUMP 189a: sched_use_asym_prio+0xa | jmp 0x18ae <sched_use_asym_prio+0x1e> | nop2 189c: sched_use_asym_prio+0xc mov $0x1,%eax 18a1: sched_use_asym_prio+0x11 and $0x80,%ecx ... exception table alternatives: native_read_msr: 5b80: native_read_msr+0x0 mov %edi,%ecx 5b82: native_read_msr+0x2 | <ex_table.5b82> | EXCEPTION 5b82: native_read_msr+0x2 | rdmsr | resume at 0x5b84 <native_read_msr+0x4> 5b84: native_read_msr+0x4 shl $0x20,%rdx .... x86 feature flag decoding (also see the X86_FEATURE_POPCNT example in sched_balance_find_dst_group() above): 2faaf: start_thread_common.constprop.0+0x1f jne 0x2fba4 <start_thread_common.constprop.0+0x114> 2fab5: start_thread_common.constprop.0+0x25 | <alternative.2fab5> | X86_FEATURE_ALWAYS | X86_BUG_NULL_SEG 2fab5: start_thread_common.constprop.0+0x25 | jmp 0x2faba <.altinstr_aux+0x2f4> | jmp 0x4b0 <start_thread_common.constprop.0+0x3f> | nop5 2faba: start_thread_common.constprop.0+0x2a mov $0x2b,%eax ... NOP sequence shortening: 1048e2: snapshot_write_finalize+0xc2 je 0x104917 <snapshot_write_finalize+0xf7> 1048e4: snapshot_write_finalize+0xc4 nop6 1048ea: snapshot_write_finalize+0xca nop11 1048f5: snapshot_write_finalize+0xd5 nop11 104900: snapshot_write_finalize+0xe0 mov %rax,%rcx 104903: snapshot_write_finalize+0xe3 mov 0x10(%rdx),%rax ... and much more. - Function validation tracing support (Alexandre Chartre) - Various -ffunction-sections fixes (Josh Poimboeuf) - Clang AutoFDO (Automated Feedback-Directed Optimizations) support (Josh Poimboeuf) - Misc fixes and cleanups (Borislav Petkov, Chen Ni, Dylan Hatch, Ingo Molnar, John Wang, Josh Poimboeuf, Pankaj Raghav, Peter Zijlstra, Thorsten Blum) * tag 'objtool-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (129 commits) objtool: Fix segfault on unknown alternatives objtool: Build with disassembly can fail when including bdf.h objtool: Trim trailing NOPs in alternative objtool: Add wide output for disassembly objtool: Compact output for alternatives with one instruction objtool: Improve naming of group alternatives objtool: Add Function to get the name of a CPU feature objtool: Provide access to feature and flags of group alternatives objtool: Fix address references in alternatives objtool: Disassemble jump table alternatives objtool: Disassemble exception table alternatives objtool: Print addresses with alternative instructions objtool: Disassemble group alternatives objtool: Print headers for alternatives objtool: Preserve alternatives order objtool: Add the --disas=<function-pattern> action objtool: Do not validate IBT for .return_sites and .call_sites objtool: Improve tracing of alternative instructions objtool: Add functions to better name alternatives objtool: Identify the different types of alternatives ...
Diffstat (limited to 'tools/objtool/trace.c')
-rw-r--r--tools/objtool/trace.c203
1 files changed, 203 insertions, 0 deletions
diff --git a/tools/objtool/trace.c b/tools/objtool/trace.c
new file mode 100644
index 000000000000..5dec44dab781
--- /dev/null
+++ b/tools/objtool/trace.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates.
+ */
+
+#include <objtool/trace.h>
+
+bool trace;
+int trace_depth;
+
+/*
+ * Macros to trace CFI state attributes changes.
+ */
+
+#define TRACE_CFI_ATTR(attr, prev, next, fmt, ...) \
+({ \
+ if ((prev)->attr != (next)->attr) \
+ TRACE("%s=" fmt " ", #attr, __VA_ARGS__); \
+})
+
+#define TRACE_CFI_ATTR_BOOL(attr, prev, next) \
+ TRACE_CFI_ATTR(attr, prev, next, \
+ "%s", (next)->attr ? "true" : "false")
+
+#define TRACE_CFI_ATTR_NUM(attr, prev, next, fmt) \
+ TRACE_CFI_ATTR(attr, prev, next, fmt, (next)->attr)
+
+#define CFI_REG_NAME_MAXLEN 16
+
+/*
+ * Return the name of a register. Note that the same static buffer
+ * is returned if the name is dynamically generated.
+ */
+static const char *cfi_reg_name(unsigned int reg)
+{
+ static char rname_buffer[CFI_REG_NAME_MAXLEN];
+ const char *rname;
+
+ switch (reg) {
+ case CFI_UNDEFINED:
+ return "<undefined>";
+ case CFI_CFA:
+ return "cfa";
+ case CFI_SP_INDIRECT:
+ return "(sp)";
+ case CFI_BP_INDIRECT:
+ return "(bp)";
+ }
+
+ if (reg < CFI_NUM_REGS) {
+ rname = arch_reg_name[reg];
+ if (rname)
+ return rname;
+ }
+
+ if (snprintf(rname_buffer, CFI_REG_NAME_MAXLEN, "r%d", reg) == -1)
+ return "<error>";
+
+ return (const char *)rname_buffer;
+}
+
+/*
+ * Functions and macros to trace CFI registers changes.
+ */
+
+static void trace_cfi_reg(const char *prefix, int reg, const char *fmt,
+ int base_prev, int offset_prev,
+ int base_next, int offset_next)
+{
+ char *rname;
+
+ if (base_prev == base_next && offset_prev == offset_next)
+ return;
+
+ if (prefix)
+ TRACE("%s:", prefix);
+
+ if (base_next == CFI_UNDEFINED) {
+ TRACE("%1$s=<undef> ", cfi_reg_name(reg));
+ } else {
+ rname = strdup(cfi_reg_name(reg));
+ TRACE(fmt, rname, cfi_reg_name(base_next), offset_next);
+ free(rname);
+ }
+}
+
+static void trace_cfi_reg_val(const char *prefix, int reg,
+ int base_prev, int offset_prev,
+ int base_next, int offset_next)
+{
+ trace_cfi_reg(prefix, reg, "%1$s=%2$s%3$+d ",
+ base_prev, offset_prev, base_next, offset_next);
+}
+
+static void trace_cfi_reg_ref(const char *prefix, int reg,
+ int base_prev, int offset_prev,
+ int base_next, int offset_next)
+{
+ trace_cfi_reg(prefix, reg, "%1$s=(%2$s%3$+d) ",
+ base_prev, offset_prev, base_next, offset_next);
+}
+
+#define TRACE_CFI_REG_VAL(reg, prev, next) \
+ trace_cfi_reg_val(NULL, reg, prev.base, prev.offset, \
+ next.base, next.offset)
+
+#define TRACE_CFI_REG_REF(reg, prev, next) \
+ trace_cfi_reg_ref(NULL, reg, prev.base, prev.offset, \
+ next.base, next.offset)
+
+void trace_insn_state(struct instruction *insn, struct insn_state *sprev,
+ struct insn_state *snext)
+{
+ struct cfi_state *cprev, *cnext;
+ int i;
+
+ if (!memcmp(sprev, snext, sizeof(struct insn_state)))
+ return;
+
+ cprev = &sprev->cfi;
+ cnext = &snext->cfi;
+
+ disas_print_insn(stderr, objtool_disas_ctx, insn,
+ trace_depth - 1, "state: ");
+
+ /* print registers changes */
+ TRACE_CFI_REG_VAL(CFI_CFA, cprev->cfa, cnext->cfa);
+ for (i = 0; i < CFI_NUM_REGS; i++) {
+ TRACE_CFI_REG_VAL(i, cprev->vals[i], cnext->vals[i]);
+ TRACE_CFI_REG_REF(i, cprev->regs[i], cnext->regs[i]);
+ }
+
+ /* print attributes changes */
+ TRACE_CFI_ATTR_NUM(stack_size, cprev, cnext, "%d");
+ TRACE_CFI_ATTR_BOOL(drap, cprev, cnext);
+ if (cnext->drap) {
+ trace_cfi_reg_val("drap", cnext->drap_reg,
+ cprev->drap_reg, cprev->drap_offset,
+ cnext->drap_reg, cnext->drap_offset);
+ }
+ TRACE_CFI_ATTR_BOOL(bp_scratch, cprev, cnext);
+ TRACE_CFI_ATTR_NUM(instr, sprev, snext, "%d");
+ TRACE_CFI_ATTR_NUM(uaccess_stack, sprev, snext, "%u");
+
+ TRACE("\n");
+
+ insn->trace = 1;
+}
+
+void trace_alt_begin(struct instruction *orig_insn, struct alternative *alt,
+ char *alt_name)
+{
+ struct instruction *alt_insn;
+ char suffix[2];
+
+ alt_insn = alt->insn;
+
+ if (alt->type == ALT_TYPE_EX_TABLE) {
+ /*
+ * When there is an exception table then the instruction
+ * at the original location is executed but it can cause
+ * an exception. In that case, the execution will be
+ * redirected to the alternative instruction.
+ *
+ * The instruction at the original location can have
+ * instruction alternatives, so we just print the location
+ * of the instruction that can cause the exception and
+ * not the instruction itself.
+ */
+ TRACE_ALT_INFO_NOADDR(orig_insn, "/ ", "%s for instruction at 0x%lx <%s+0x%lx>",
+ alt_name,
+ orig_insn->offset, orig_insn->sym->name,
+ orig_insn->offset - orig_insn->sym->offset);
+ } else {
+ TRACE_ALT_INFO_NOADDR(orig_insn, "/ ", "%s", alt_name);
+ }
+
+ if (alt->type == ALT_TYPE_JUMP_TABLE) {
+ /*
+ * For a jump alternative, if the default instruction is
+ * a NOP then it is replaced with the jmp instruction,
+ * otherwise it is replaced with a NOP instruction.
+ */
+ trace_depth++;
+ if (orig_insn->type == INSN_NOP) {
+ suffix[0] = (orig_insn->len == 5) ? 'q' : '\0';
+ TRACE_ADDR(orig_insn, "jmp%-3s %lx <%s+0x%lx>", suffix,
+ alt_insn->offset, alt_insn->sym->name,
+ alt_insn->offset - alt_insn->sym->offset);
+ } else {
+ TRACE_ADDR(orig_insn, "nop%d", orig_insn->len);
+ trace_depth--;
+ }
+ }
+}
+
+void trace_alt_end(struct instruction *orig_insn, struct alternative *alt,
+ char *alt_name)
+{
+ if (alt->type == ALT_TYPE_JUMP_TABLE && orig_insn->type == INSN_NOP)
+ trace_depth--;
+ TRACE_ALT_INFO_NOADDR(orig_insn, "\\ ", "%s", alt_name);
+}