summaryrefslogtreecommitdiff
path: root/arch/um/include/asm/pgtable.h
diff options
context:
space:
mode:
authorBenjamin Berg <benjamin.berg@intel.com>2024-07-03 15:45:36 +0200
committerJohannes Berg <johannes.berg@intel.com>2024-07-03 17:09:50 +0200
commitbcf3d957c63d8b6d718b862fea18c5f14ce803e2 (patch)
tree607887b1ddcbee5522d4066df731ba739bbaa26a /arch/um/include/asm/pgtable.h
parent573a446fc8ea4ca9be60b1db2091297da48d0a0d (diff)
um: refactor TLB update handling
Conceptually, we want the memory mappings to always be up to date and represent whatever is in the TLB. To ensure that, we need to sync them over in the userspace case and for the kernel we need to process the mappings. The kernel will call flush_tlb_* if page table entries that were valid before become invalid. Unfortunately, this is not the case if entries are added. As such, change both flush_tlb_* and set_ptes to track the memory range that has to be synchronized. For the kernel, we need to execute a flush_tlb_kern_* immediately but we can wait for the first page fault in case of set_ptes. For userspace in contrast we only store that a range of memory needs to be synced and do so whenever we switch to that process. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Link: https://patch.msgid.link/20240703134536.1161108-13-benjamin@sipsolutions.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'arch/um/include/asm/pgtable.h')
-rw-r--r--arch/um/include/asm/pgtable.h32
1 files changed, 32 insertions, 0 deletions
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index e1ece21dbe3f..5bb397b65efb 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -244,6 +244,38 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
#define PFN_PTE_SHIFT PAGE_SHIFT
+static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ if (!mm->context.sync_tlb_range_to) {
+ mm->context.sync_tlb_range_from = start;
+ mm->context.sync_tlb_range_to = end;
+ } else {
+ if (start < mm->context.sync_tlb_range_from)
+ mm->context.sync_tlb_range_from = start;
+ if (end > mm->context.sync_tlb_range_to)
+ mm->context.sync_tlb_range_to = end;
+ }
+}
+
+#define set_ptes set_ptes
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, int nr)
+{
+ /* Basically the default implementation */
+ size_t length = nr * PAGE_SIZE;
+
+ for (;;) {
+ set_pte(ptep, pte);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
+ }
+
+ um_tlb_mark_sync(mm, addr, addr + length);
+}
+
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{