diff options
| author | Oliver Upton <oliver.upton@linux.dev> | 2025-07-08 10:25:11 -0700 |
|---|---|---|
| committer | Oliver Upton <oliver.upton@linux.dev> | 2025-07-08 11:36:31 -0700 |
| commit | 77ee70a073575977b403e9add25f185d614217d8 (patch) | |
| tree | 8a727a6e591ab61cc26230a9d8cd309ececbd642 /arch/arm64/include/asm/kvm_emulate.h | |
| parent | 9aba641b9ec2a9f443a6c666c054c5e98ef550b5 (diff) | |
KVM: arm64: nv: Honor SError exception routing / masking
To date KVM has used HCR_EL2.VSE to track the state of a pending SError
for the guest. With this bit set, hardware respects the EL1 exception
routing / masking rules and injects the vSError when appropriate.
This isn't correct for NV guests as hardware is oblivious to vEL2's
intentions for SErrors. Better yet, with FEAT_NV2 the guest can change
the routing behind our back as HCR_EL2 is redirected to memory. Cope
with this mess by:
- Using a flag (instead of HCR_EL2.VSE) to track the pending SError
state when SErrors are unconditionally masked for the current context
- Resampling the routing / masking of a pending SError on every guest
entry/exit
- Emulating exception entry when SError routing implies a translation
regime change
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250708172532.1699409-7-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/include/asm/kvm_emulate.h')
| -rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3a27ed4de9ac..daa0410aaebf 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -45,7 +45,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); void kvm_skip_instr32(struct kvm_vcpu *vcpu); void kvm_inject_undefined(struct kvm_vcpu *vcpu); -void kvm_inject_vabt(struct kvm_vcpu *vcpu); +int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr); int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr); void kvm_inject_size_fault(struct kvm_vcpu *vcpu); @@ -59,12 +59,25 @@ static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr) return kvm_inject_sea(vcpu, true, addr); } +static inline int kvm_inject_serror(struct kvm_vcpu *vcpu) +{ + /* + * ESR_ELx.ISV (later renamed to IDS) indicates whether or not + * ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information. + * + * Set the bit when injecting an SError w/o an ESR to indicate ISS + * does not follow the architected format. + */ + return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV); +} + void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu); int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2); int kvm_inject_nested_irq(struct kvm_vcpu *vcpu); int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr); +int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr); static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu) { @@ -205,6 +218,11 @@ static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu) return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE; } +static inline bool vcpu_el2_amo_is_set(const struct kvm_vcpu *vcpu) +{ + return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_AMO; +} + static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu) { bool e2h, tge; |