From aba7de6088be5a3b5d766c5f7fdb5d0790ff8f13 Mon Sep 17 00:00:00 2001 From: Pawan Gupta Date: Thu, 13 Nov 2025 15:37:39 -0800 Subject: x86/bugs: Use VM_CLEAR_CPU_BUFFERS in VMX as well TSA mitigation: d8010d4ba43e ("x86/bugs: Add a Transient Scheduler Attacks mitigation") introduced VM_CLEAR_CPU_BUFFERS for guests on AMD CPUs. Currently on Intel CLEAR_CPU_BUFFERS is being used for guests which has a much broader scope (kernel->user also). Make mitigations on Intel consistent with TSA. This would help handling the guest-only mitigations better in future. Signed-off-by: Pawan Gupta [sean: make CLEAR_CPU_BUF_VM mutually exclusive with the MMIO mitigation] Acked-by: Borislav Petkov (AMD) Reviewed-by: Brendan Jackman Link: https://patch.msgid.link/20251113233746.1703361-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kernel/cpu/bugs.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'arch/x86/kernel/cpu/bugs.c') diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 6a526ae1fe99..2847e11fbab5 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -194,7 +194,7 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); /* * Controls CPU Fill buffer clear before VMenter. This is a subset of - * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only + * X86_FEATURE_CLEAR_CPU_BUF_VM, and should only be enabled when KVM-only * mitigation is required. */ DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); @@ -489,8 +489,8 @@ static enum rfds_mitigations rfds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; /* - * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing - * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. + * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing on exit to + * userspace *and* on entry to KVM guests. */ static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; @@ -536,6 +536,7 @@ static void __init mds_apply_mitigation(void) if (mds_mitigation == MDS_MITIGATION_FULL || mds_mitigation == MDS_MITIGATION_VMWERV) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) cpu_smt_disable(false); @@ -647,6 +648,7 @@ static void __init taa_apply_mitigation(void) * present on host, enable the mitigation for UCODE_NEEDED as well. */ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) cpu_smt_disable(false); @@ -748,6 +750,7 @@ static void __init mmio_apply_mitigation(void) */ if (verw_clear_cpu_buf_mitigation_selected) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); static_branch_disable(&cpu_buf_vm_clear); } else { static_branch_enable(&cpu_buf_vm_clear); @@ -839,8 +842,10 @@ static void __init rfds_update_mitigation(void) static void __init rfds_apply_mitigation(void) { - if (rfds_mitigation == RFDS_MITIGATION_VERW) + if (rfds_mitigation == RFDS_MITIGATION_VERW) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); + } } static __init int rfds_parse_cmdline(char *str) -- cgit v1.2.3 From f6106d41ec84e552a5e8adda1f8741cab96a5425 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 13 Nov 2025 15:37:41 -0800 Subject: x86/bugs: Use an x86 feature to track the MMIO Stale Data mitigation Convert the MMIO Stale Data mitigation tracking from a static branch into an x86 feature flag so that it can be used via ALTERNATIVE_2 in KVM. No functional change intended. Reviewed-by: Pawan Gupta Reviewed-by: Brendan Jackman Link: https://patch.msgid.link/20251113233746.1703361-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/cpufeatures.h | 5 +++++ arch/x86/include/asm/nospec-branch.h | 2 -- arch/x86/kernel/cpu/bugs.c | 11 +---------- arch/x86/kvm/mmu/spte.c | 2 +- arch/x86/kvm/vmx/vmx.c | 4 ++-- 5 files changed, 9 insertions(+), 15 deletions(-) (limited to 'arch/x86/kernel/cpu/bugs.c') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 4091a776e37a..fc5698844a0b 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -499,6 +499,11 @@ #define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */ #define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */ #define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */ +#define X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO (21*32+17) /* + * Clear CPU buffers before VM-Enter if the vCPU + * can access host MMIO (ignored for all intents + * and purposes if CLEAR_CPU_BUF_VM is set). + */ /* * BUG word(s) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index cb36a8ea00d3..afdcdf40e414 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -585,8 +585,6 @@ DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); -DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); - extern u16 x86_verw_sel; #include diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 2847e11fbab5..8391a20fe5a8 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -192,14 +192,6 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); -/* - * Controls CPU Fill buffer clear before VMenter. This is a subset of - * X86_FEATURE_CLEAR_CPU_BUF_VM, and should only be enabled when KVM-only - * mitigation is required. - */ -DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); -EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); - #undef pr_fmt #define pr_fmt(fmt) "mitigations: " fmt @@ -751,9 +743,8 @@ static void __init mmio_apply_mitigation(void) if (verw_clear_cpu_buf_mitigation_selected) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); - static_branch_disable(&cpu_buf_vm_clear); } else { - static_branch_enable(&cpu_buf_vm_clear); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO); } /* diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 37647afde7d3..85a0473809b0 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -292,7 +292,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); } - if (static_branch_unlikely(&cpu_buf_vm_clear) && + if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) && !kvm_vcpu_can_access_host_mmio(vcpu) && kvm_is_mmio_pfn(pfn, &is_host_mmio)) kvm_track_host_mmio_mapping(vcpu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d98107a7bdaa..67702609f68e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -903,7 +903,7 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)) flags |= VMX_RUN_SAVE_SPEC_CTRL; - if (static_branch_unlikely(&cpu_buf_vm_clear) && + if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) && kvm_vcpu_can_access_host_mmio(&vmx->vcpu)) flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO; @@ -7325,7 +7325,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, */ if (static_branch_unlikely(&vmx_l1d_should_flush)) vmx_l1d_flush(vcpu); - else if (static_branch_unlikely(&cpu_buf_vm_clear) && + else if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) && (flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO)) x86_clear_cpu_buffers(); -- cgit v1.2.3