summaryrefslogtreecommitdiff
path: root/arch/x86/mm/pat/set_memory.c
diff options
context:
space:
mode:
authorYu-cheng Yu <yu-cheng.yu@intel.com>2025-06-06 13:10:35 -0400
committerDave Hansen <dave.hansen@linux.intel.com>2025-08-22 07:55:21 -0700
commit86e6815b316ec0ea8c4bb3c16a033219a52b6060 (patch)
treeda55e95401e110e9026da49aefb1bbf8eef72d00 /arch/x86/mm/pat/set_memory.c
parentc17b750b3ad9f45f2b6f7e6f7f4679844244f0b9 (diff)
x86/mm: Change cpa_flush() to call flush_kernel_range() directly
The function cpa_flush() calls __flush_tlb_one_kernel() and flush_tlb_all(). Replacing that with a call to flush_tlb_kernel_range() allows cpa_flush() to make use of INVLPGB or RAR without any additional changes. Initialize invlpgb_count_max to 1, since flush_tlb_kernel_range() can now be called before invlpgb_count_max has been initialized to the value read from CPUID. [riel: remove now unused __cpa_flush_tlb] Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com> Signed-off-by: Rik van Riel <riel@surriel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://lore.kernel.org/all/20250606171112.4013261-4-riel%40surriel.com
Diffstat (limited to 'arch/x86/mm/pat/set_memory.c')
-rw-r--r--arch/x86/mm/pat/set_memory.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 8834c76f91c9..d2d54b8c4dbb 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -399,15 +399,6 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}
-static void __cpa_flush_tlb(void *data)
-{
- struct cpa_data *cpa = data;
- unsigned int i;
-
- for (i = 0; i < cpa->numpages; i++)
- flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
-}
-
static int collapse_large_pages(unsigned long addr, struct list_head *pgtables);
static void cpa_collapse_large_pages(struct cpa_data *cpa)
@@ -444,6 +435,7 @@ static void cpa_collapse_large_pages(struct cpa_data *cpa)
static void cpa_flush(struct cpa_data *cpa, int cache)
{
+ unsigned long start, end;
unsigned int i;
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
@@ -453,10 +445,12 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
goto collapse_large_pages;
}
- if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
- flush_tlb_all();
- else
- on_each_cpu(__cpa_flush_tlb, cpa, 1);
+ start = fix_addr(__cpa_addr(cpa, 0));
+ end = fix_addr(__cpa_addr(cpa, cpa->numpages));
+ if (cpa->force_flush_all)
+ end = TLB_FLUSH_ALL;
+
+ flush_tlb_kernel_range(start, end);
if (!cache)
goto collapse_large_pages;