summaryrefslogtreecommitdiff
path: root/arch/x86/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-03-30 12:43:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-03-30 12:43:03 -0700
commitfa593d0f969dcfa41d390822fdf1a0ab48cd882c (patch)
treef4b926a8ada7b6c38d852d3bf5d3d2dfabd3ff78 /arch/x86/net
parent7f2ff7b6261742ed52aa973ccdf99151b7cc3a50 (diff)
parent9aa8fe29f624610b4694d5b5695e1017c4753f31 (diff)
Merge tag 'bpf-next-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf updates from Alexei Starovoitov: "For this merge window we're splitting BPF pull request into three for higher visibility: main changes, res_spin_lock, try_alloc_pages. These are the main BPF changes: - Add DFA-based live registers analysis to improve verification of programs with loops (Eduard Zingerman) - Introduce load_acquire and store_release BPF instructions and add x86, arm64 JIT support (Peilin Ye) - Fix loop detection logic in the verifier (Eduard Zingerman) - Drop unnecesary lock in bpf_map_inc_not_zero() (Eric Dumazet) - Add kfunc for populating cpumask bits (Emil Tsalapatis) - Convert various shell based tests to selftests/bpf/test_progs format (Bastien Curutchet) - Allow passing referenced kptrs into struct_ops callbacks (Amery Hung) - Add a flag to LSM bpf hook to facilitate bpf program signing (Blaise Boscaccy) - Track arena arguments in kfuncs (Ihor Solodrai) - Add copy_remote_vm_str() helper for reading strings from remote VM and bpf_copy_from_user_task_str() kfunc (Jordan Rome) - Add support for timed may_goto instruction (Kumar Kartikeya Dwivedi) - Allow bpf_get_netns_cookie() int cgroup_skb programs (Mahe Tardy) - Reduce bpf_cgrp_storage_busy false positives when accessing cgroup local storage (Martin KaFai Lau) - Introduce bpf_dynptr_copy() kfunc (Mykyta Yatsenko) - Allow retrieving BTF data with BTF token (Mykyta Yatsenko) - Add BPF kfuncs to set and get xattrs with 'security.bpf.' prefix (Song Liu) - Reject attaching programs to noreturn functions (Yafang Shao) - Introduce pre-order traversal of cgroup bpf programs (Yonghong Song)" * tag 'bpf-next-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (186 commits) selftests/bpf: Add selftests for load-acquire/store-release when register number is invalid bpf: Fix out-of-bounds read in check_atomic_load/store() libbpf: Add namespace for errstr making it libbpf_errstr bpf: Add struct_ops context information to struct bpf_prog_aux selftests/bpf: Sanitize pointer prior fclose() selftests/bpf: Migrate test_xdp_vlan.sh into test_progs selftests/bpf: test_xdp_vlan: Rename BPF sections bpf: clarify a misleading verifier error message selftests/bpf: Add selftest for attaching fexit to __noreturn functions bpf: Reject attaching fexit/fmod_ret to __noreturn functions bpf: Only fails the busy counter check in bpf_cgrp_storage_get if it creates storage bpf: Make perf_event_read_output accessible in all program types. bpftool: Using the right format specifiers bpftool: Add -Wformat-signedness flag to detect format errors selftests/bpf: Test freplace from user namespace libbpf: Pass BPF token from find_prog_btf_id to BPF_BTF_GET_FD_BY_ID bpf: Return prog btf_id without capable check bpf: BPF token support for BPF_BTF_GET_FD_BY_ID bpf, x86: Fix objtool warning for timed may_goto bpf: Check map->record at the beginning of check_and_free_fields() ...
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/Makefile2
-rw-r--r--arch/x86/net/bpf_jit_comp.c100
-rw-r--r--arch/x86/net/bpf_timed_may_goto.S55
3 files changed, 143 insertions, 14 deletions
diff --git a/arch/x86/net/Makefile b/arch/x86/net/Makefile
index 383c87300b0d..dddbefc0f439 100644
--- a/arch/x86/net/Makefile
+++ b/arch/x86/net/Makefile
@@ -6,5 +6,5 @@
ifeq ($(CONFIG_X86_32),y)
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o
else
- obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o
endif
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 72776dcb75aa..9e5fe2ba858f 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1250,8 +1250,8 @@ static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
}
-static int emit_atomic(u8 **pprog, u8 atomic_op,
- u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
+static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
+ u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
{
u8 *prog = *pprog;
@@ -1291,8 +1291,9 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
return 0;
}
-static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
- u32 dst_reg, u32 src_reg, u32 index_reg, int off)
+static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size,
+ u32 dst_reg, u32 src_reg, u32 index_reg,
+ int off)
{
u8 *prog = *pprog;
@@ -1305,7 +1306,7 @@ static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
break;
default:
- pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
+ pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
return -EFAULT;
}
@@ -1339,6 +1340,49 @@ static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
return 0;
}
+static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg,
+ u32 src_reg, s16 off, u8 bpf_size)
+{
+ switch (atomic_op) {
+ case BPF_LOAD_ACQ:
+ /* dst_reg = smp_load_acquire(src_reg + off16) */
+ emit_ldx(pprog, bpf_size, dst_reg, src_reg, off);
+ break;
+ case BPF_STORE_REL:
+ /* smp_store_release(dst_reg + off16, src_reg) */
+ emit_stx(pprog, bpf_size, dst_reg, src_reg, off);
+ break;
+ default:
+ pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
+ atomic_op);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size,
+ u32 dst_reg, u32 src_reg, u32 index_reg,
+ int off)
+{
+ switch (atomic_op) {
+ case BPF_LOAD_ACQ:
+ /* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */
+ emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off);
+ break;
+ case BPF_STORE_REL:
+ /* smp_store_release(dst_reg + idx_reg + off16, src_reg) */
+ emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off);
+ break;
+ default:
+ pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
+ atomic_op);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
#define DONT_CLEAR 1
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
@@ -2121,6 +2165,13 @@ populate_extable:
}
break;
+ case BPF_STX | BPF_ATOMIC | BPF_B:
+ case BPF_STX | BPF_ATOMIC | BPF_H:
+ if (!bpf_atomic_is_load_store(insn)) {
+ pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
+ return -EFAULT;
+ }
+ fallthrough;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
if (insn->imm == (BPF_AND | BPF_FETCH) ||
@@ -2156,10 +2207,10 @@ populate_extable:
EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */
- err = emit_atomic(&prog, BPF_CMPXCHG,
- real_dst_reg, AUX_REG,
- insn->off,
- BPF_SIZE(insn->code));
+ err = emit_atomic_rmw(&prog, BPF_CMPXCHG,
+ real_dst_reg, AUX_REG,
+ insn->off,
+ BPF_SIZE(insn->code));
if (WARN_ON(err))
return err;
/*
@@ -2174,17 +2225,35 @@ populate_extable:
break;
}
- err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
- insn->off, BPF_SIZE(insn->code));
+ if (bpf_atomic_is_load_store(insn))
+ err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg,
+ insn->off, BPF_SIZE(insn->code));
+ else
+ err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg,
+ insn->off, BPF_SIZE(insn->code));
if (err)
return err;
break;
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_B:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_H:
+ if (!bpf_atomic_is_load_store(insn)) {
+ pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
+ return -EFAULT;
+ }
+ fallthrough;
case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
start_of_ldx = prog;
- err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
- dst_reg, src_reg, X86_REG_R12, insn->off);
+
+ if (bpf_atomic_is_load_store(insn))
+ err = emit_atomic_ld_st_index(&prog, insn->imm,
+ BPF_SIZE(insn->code), dst_reg,
+ src_reg, X86_REG_R12, insn->off);
+ else
+ err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code),
+ dst_reg, src_reg, X86_REG_R12,
+ insn->off);
if (err)
return err;
goto populate_extable;
@@ -3801,3 +3870,8 @@ u64 bpf_arch_uaddress_limit(void)
{
return 0;
}
+
+bool bpf_jit_supports_timed_may_goto(void)
+{
+ return true;
+}
diff --git a/arch/x86/net/bpf_timed_may_goto.S b/arch/x86/net/bpf_timed_may_goto.S
new file mode 100644
index 000000000000..54c690cae190
--- /dev/null
+++ b/arch/x86/net/bpf_timed_may_goto.S
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/export.h>
+#include <linux/linkage.h>
+#include <asm/nospec-branch.h>
+
+ .code64
+ .section .text, "ax"
+
+SYM_FUNC_START(arch_bpf_timed_may_goto)
+ ANNOTATE_NOENDBR
+
+ /*
+ * r10 passes us stack depth, load the pointer to count and timestamp
+ * into r10 by adding it to BPF frame pointer.
+ */
+ leaq (%rbp, %r10, 1), %r10
+
+ /* Setup frame. */
+ pushq %rbp
+ movq %rsp, %rbp
+
+ /* Save r0-r5. */
+ pushq %rax
+ pushq %rdi
+ pushq %rsi
+ pushq %rdx
+ pushq %rcx
+ pushq %r8
+
+ /*
+ * r10 has the pointer to count and timestamp, pass it as first
+ * argument.
+ */
+ movq %r10, %rdi
+
+ /* Emit call depth accounting for call below. */
+ CALL_DEPTH_ACCOUNT
+ call bpf_check_timed_may_goto
+
+ /* BPF_REG_AX=r10 will be stored into count, so move return value to it. */
+ movq %rax, %r10
+
+ /* Restore r5-r0. */
+ popq %r8
+ popq %rcx
+ popq %rdx
+ popq %rsi
+ popq %rdi
+ popq %rax
+
+ leave
+ RET
+SYM_FUNC_END(arch_bpf_timed_may_goto)