summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/hypercalls.c
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2023-04-21 09:30:46 +0100
committerMarc Zyngier <maz@kernel.org>2023-04-21 09:30:46 +0100
commitef5f97e9de9fc0d5bb6136de3d01d78c072a452f (patch)
tree4e5c64a8c0724b7584928a45c34241dcab0d2c0f /arch/arm64/kvm/hypercalls.c
parent197b6b60ae7bc51dd0814953c562833143b292aa (diff)
parenta189884bdc9238aeba941c50f02e25eb584fafed (diff)
Merge branch kvm-arm64/lock-inversion into kvmarm-master/next
* kvm-arm64/lock-inversion: : . : vm/vcpu lock inversion fixes, courtesy of Oliver Upton, plus a few : extra fixes from both Oliver and Reiji Watanabe. : : From the initial cover letter: : : As it so happens, lock ordering in KVM/arm64 is completely backwards. : There's a significant amount of VM-wide state that needs to be accessed : from the context of a vCPU. Until now, this was accomplished by : acquiring the kvm->lock, but that cannot be nested within vcpu->mutex. : : This series fixes the issue with some fine-grained locking for MP state : and a new, dedicated mutex that can nest with both kvm->lock and : vcpu->mutex. : . KVM: arm64: Have kvm_psci_vcpu_on() use WRITE_ONCE() to update mp_state KVM: arm64: Acquire mp_state_lock in kvm_arch_vcpu_ioctl_vcpu_init() KVM: arm64: vgic: Don't acquire its_lock before config_lock KVM: arm64: Use config_lock to protect vgic state KVM: arm64: Use config_lock to protect data ordered against KVM_RUN KVM: arm64: Avoid lock inversion when setting the VM register width KVM: arm64: Avoid vcpu->mutex v. kvm->lock inversion in CPU_ON Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/hypercalls.c')
-rw-r--r--arch/arm64/kvm/hypercalls.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 5da884e11337..fbdbf4257f76 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -377,7 +377,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
if (val & ~fw_reg_features)
return -EINVAL;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
val != *fw_reg_bmap) {
@@ -387,7 +387,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
WRITE_ONCE(*fw_reg_bmap, val);
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return ret;
}