summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-02 08:01:39 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-02 08:01:39 -0800
commit1dce50698a5ceedaca806e0a78573886a363dc95 (patch)
tree6ce44b381ce5935c0f5546534b87666ab1140bf5 /kernel
parent4a26e7032d7d57c998598c08a034872d6f0d3945 (diff)
parent4322c8f81c58da493a3c46eda32f0e7534a350a0 (diff)
Merge tag 'core-uaccess-2025-11-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scoped user access updates from Thomas Gleixner: "Scoped user mode access and related changes: - Implement the missing u64 user access function on ARM when CONFIG_CPU_SPECTRE=n. This makes it possible to access a 64bit value in generic code with [unsafe_]get_user(). All other architectures and ARM variants provide the relevant accessors already. - Ensure that ASM GOTO jump label usage in the user mode access helpers always goes through a local C scope label indirection inside the helpers. This is required because compilers are not supporting that a ASM GOTO target leaves a auto cleanup scope. GCC silently fails to emit the cleanup invocation and CLANG fails the build. [ Editor's note: gcc-16 will have fixed the code generation issue in commit f68fe3ddda4 ("eh: Invoke cleanups/destructors in asm goto jumps [PR122835]"). But we obviously have to deal with clang and older versions of gcc, so.. - Linus ] This provides generic wrapper macros and the conversion of affected architecture code to use them. - Scoped user mode access with auto cleanup Access to user mode memory can be required in hot code paths, but if it has to be done with user controlled pointers, the access is shielded with a speculation barrier, so that the CPU cannot speculate around the address range check. Those speculation barriers impact performance quite significantly. This cost can be avoided by "masking" the provided pointer so it is guaranteed to be in the valid user memory access range and otherwise to point to a guaranteed unpopulated address space. This has to be done without branches so it creates an address dependency for the access, which the CPU cannot speculate ahead. This results in repeating and error prone programming patterns: if (can_do_masked_user_access()) from = masked_user_read_access_begin((from)); else if (!user_read_access_begin(from, sizeof(*from))) return -EFAULT; unsafe_get_user(val, from, Efault); user_read_access_end(); return 0; Efault: user_read_access_end(); return -EFAULT; which can be replaced with scopes and automatic cleanup: scoped_user_read_access(from, Efault) unsafe_get_user(val, from, Efault); return 0; Efault: return -EFAULT; - Convert code which implements the above pattern over to scope_user.*.access(). This also corrects a couple of imbalanced masked_*_begin() instances which are harmless on most architectures, but prevent PowerPC from implementing the masking optimization. - Add a missing speculation barrier in copy_from_user_iter()" * tag 'core-uaccess-2025-11-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: lib/strn*,uaccess: Use masked_user_{read/write}_access_begin when required scm: Convert put_cmsg() to scoped user access iov_iter: Add missing speculation barrier to copy_from_user_iter() iov_iter: Convert copy_from_user_iter() to masked user access select: Convert to scoped user access x86/futex: Convert to scoped user access futex: Convert to get/put_user_inline() uaccess: Provide put/get_user_inline() uaccess: Provide scoped user access regions arm64: uaccess: Use unsafe wrappers for ASM GOTO s390/uaccess: Use unsafe wrappers for ASM GOTO riscv/uaccess: Use unsafe wrappers for ASM GOTO powerpc/uaccess: Use unsafe wrappers for ASM GOTO x86/uaccess: Use unsafe wrappers for ASM GOTO uaccess: Provide ASM GOTO safe wrappers for unsafe_*_user() ARM: uaccess: Implement missing __get_user_asm_dword()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex/core.c4
-rw-r--r--kernel/futex/futex.h58
2 files changed, 5 insertions, 57 deletions
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 2e77a6e5c865..cf7e610eac42 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -581,7 +581,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
if (flags & FLAGS_NUMA) {
u32 __user *naddr = (void *)uaddr + size / 2;
- if (futex_get_value(&node, naddr))
+ if (get_user_inline(node, naddr))
return -EFAULT;
if ((node != FUTEX_NO_NODE) &&
@@ -601,7 +601,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
node = numa_node_id();
node_updated = true;
}
- if (node_updated && futex_put_value(node, naddr))
+ if (node_updated && put_user_inline(node, naddr))
return -EFAULT;
}
diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
index 2cd57096c38e..30c2afa03889 100644
--- a/kernel/futex/futex.h
+++ b/kernel/futex/futex.h
@@ -281,63 +281,11 @@ static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32
return ret;
}
-/*
- * This does a plain atomic user space read, and the user pointer has
- * already been verified earlier by get_futex_key() to be both aligned
- * and actually in user space, just like futex_atomic_cmpxchg_inatomic().
- *
- * We still want to avoid any speculation, and while __get_user() is
- * the traditional model for this, it's actually slower than doing
- * this manually these days.
- *
- * We could just have a per-architecture special function for it,
- * the same way we do futex_atomic_cmpxchg_inatomic(), but rather
- * than force everybody to do that, write it out long-hand using
- * the low-level user-access infrastructure.
- *
- * This looks a bit overkill, but generally just results in a couple
- * of instructions.
- */
-static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
-{
- u32 val;
-
- if (can_do_masked_user_access())
- from = masked_user_access_begin(from);
- else if (!user_read_access_begin(from, sizeof(*from)))
- return -EFAULT;
- unsafe_get_user(val, from, Efault);
- user_read_access_end();
- *dest = val;
- return 0;
-Efault:
- user_read_access_end();
- return -EFAULT;
-}
-
-static __always_inline int futex_put_value(u32 val, u32 __user *to)
-{
- if (can_do_masked_user_access())
- to = masked_user_access_begin(to);
- else if (!user_write_access_begin(to, sizeof(*to)))
- return -EFAULT;
- unsafe_put_user(val, to, Efault);
- user_write_access_end();
- return 0;
-Efault:
- user_write_access_end();
- return -EFAULT;
-}
-
+/* Read from user memory with pagefaults disabled */
static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
{
- int ret;
-
- pagefault_disable();
- ret = futex_get_value(dest, from);
- pagefault_enable();
-
- return ret;
+ guard(pagefault)();
+ return get_user_inline(*dest, from);
}
extern void __futex_unqueue(struct futex_q *q);