summaryrefslogtreecommitdiff
path: root/arch/um/kernel/mem.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2025-02-10 17:09:25 +0100
committerJohannes Berg <johannes.berg@intel.com>2025-03-18 11:03:14 +0100
commitd1d7f01f7cd35e16c6bcef5a0e31988b5c9980f9 (patch)
treeac2a7b2c4ccc6a9f1749c48eca519d615d125942 /arch/um/kernel/mem.c
parent5550187c4c21740942c32a9ae56f9f472a104cb4 (diff)
um: mark rodata read-only and implement _nofault accesses
Mark read-only data actually read-only (simple mprotect), and to be able to test it also implement _nofault accesses. This works by setting up a new "segv_continue" pointer in current, and then when we hit a segfault we change the signal return context so that we continue at that address. The code using this sets it up so that it jumps to a label and then aborts the access that way, returning -EFAULT. It's possible to optimize the ___backtrack_faulted() thing by using asm goto (compiler version dependent) and/or gcc's (not sure if clang has it) &&label extension, but at least in one attempt I made the && caused the compiler to not load -EFAULT into the register in case of jumping to the &&label from the fault handler. So leave it like this for now. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Co-developed-by: Benjamin Berg <benjamin.berg@intel.com> Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Link: https://patch.msgid.link/20250210160926.420133-2-benjamin@sipsolutions.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'arch/um/kernel/mem.c')
-rw-r--r--arch/um/kernel/mem.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index befed230aac2..05ffceb555b4 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -9,6 +9,8 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/sections.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <as-layout.h>
@@ -241,3 +243,11 @@ static const pgprot_t protection_map[16] = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
};
DECLARE_VM_GET_PAGE_PROT
+
+void mark_rodata_ro(void)
+{
+ unsigned long rodata_start = PFN_ALIGN(__start_rodata);
+ unsigned long rodata_end = PFN_ALIGN(__end_rodata);
+
+ os_protect_memory((void *)rodata_start, rodata_end - rodata_start, 1, 0, 0);
+}