summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBinbin Wu <binbin.wu@linux.intel.com>2025-11-10 13:05:39 +0800
committerSean Christopherson <seanjc@google.com>2025-11-19 05:41:12 -0800
commit0b28f21ad46200fec9f8b8058692902d0de98221 (patch)
tree85a1e6f762e3c2adb150b4a40cbe61e401b8963b
parent7649412af3eab700037feb14822d03eba484c576 (diff)
KVM: x86: Add a helper to dedup loading guest/host XCR0 and XSS
Add and use a helper, kvm_load_xfeatures(), to dedup the code that loads guest/host xfeatures. Opportunistically return early if X86_CR4_OSXSAVE is not set to reduce indentations. No functional change intended. Suggested-by: Chao Gao <chao.gao@intel.com> Reviewed-by: Chao Gao <chao.gao@intel.com> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com> Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com> Link: https://patch.msgid.link/20251110050539.3398759-1-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/x86.c33
1 files changed, 10 insertions, 23 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ef77a1be9b2..aff32603a043 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1205,34 +1205,21 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw);
-static void kvm_load_guest_xfeatures(struct kvm_vcpu *vcpu)
+static void kvm_load_xfeatures(struct kvm_vcpu *vcpu, bool load_guest)
{
if (vcpu->arch.guest_state_protected)
return;
- if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
- if (vcpu->arch.xcr0 != kvm_host.xcr0)
- xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
-
- if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
- vcpu->arch.ia32_xss != kvm_host.xss)
- wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss);
- }
-}
-
-static void kvm_load_host_xfeatures(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.guest_state_protected)
+ if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE))
return;
- if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
- if (vcpu->arch.xcr0 != kvm_host.xcr0)
- xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
+ if (vcpu->arch.xcr0 != kvm_host.xcr0)
+ xsetbv(XCR_XFEATURE_ENABLED_MASK,
+ load_guest ? vcpu->arch.xcr0 : kvm_host.xcr0);
- if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
- vcpu->arch.ia32_xss != kvm_host.xss)
- wrmsrq(MSR_IA32_XSS, kvm_host.xss);
- }
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
+ vcpu->arch.ia32_xss != kvm_host.xss)
+ wrmsrq(MSR_IA32_XSS, load_guest ? vcpu->arch.ia32_xss : kvm_host.xss);
}
static void kvm_load_guest_pkru(struct kvm_vcpu *vcpu)
@@ -11271,7 +11258,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.guest_fpu.xfd_err)
wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
- kvm_load_guest_xfeatures(vcpu);
+ kvm_load_xfeatures(vcpu, true);
if (unlikely(vcpu->arch.switch_db_regs &&
!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
@@ -11367,7 +11354,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
- kvm_load_host_xfeatures(vcpu);
+ kvm_load_xfeatures(vcpu, false);
/*
* Sync xfd before calling handle_exit_irqoff() which may