summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorOliver Upton <oupton@kernel.org>2025-11-24 11:01:54 -0800
committerOliver Upton <oupton@kernel.org>2025-12-01 00:44:02 -0800
commitbff8aa213dee742b09151a34494418050afed948 (patch)
treeb2886a4aa128c23afff1e4a9cfeef2b07b588736 /arch
parent92c6443222ca4289191d797ac79176c560886998 (diff)
KVM: arm64: Implement HW access flag management in stage-1 SW PTW
Atomically update the Access flag at stage-1 when the guest has configured the MMU to do so. Make the implementation choice (and liberal interpretation of speculation) that any access type updates the Access flag, including AT and CMO instructions. Restart the entire walk by returning to the exception-generating instruction in the case of a failed Access flag update. Reviewed-by: Marc Zyngier <maz@kernel.org> Tested-by: Marc Zyngier <maz@kernel.org> Link: https://msgid.link/20251124190158.177318-13-oupton@kernel.org Signed-off-by: Oliver Upton <oupton@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/kvm_nested.h1
-rw-r--r--arch/arm64/kvm/at.c33
2 files changed, 32 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 6dbc2908aed9..905c658057a4 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -353,6 +353,7 @@ struct s1_walk_info {
bool be;
bool s2;
bool pa52bit;
+ bool ha;
};
struct s1_walk_result {
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index 2a99380ada6f..e39f814d247f 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -346,6 +346,8 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
wi->baddr &= GENMASK_ULL(wi->max_oa_bits - 1, x);
+ wi->ha = tcr & TCR_HA;
+
return 0;
addrsz:
@@ -380,10 +382,24 @@ static int kvm_read_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 *desc,
return 0;
}
+static int kvm_swap_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 old, u64 new,
+ struct s1_walk_info *wi)
+{
+ if (wi->be) {
+ old = cpu_to_be64(old);
+ new = cpu_to_be64(new);
+ } else {
+ old = cpu_to_le64(old);
+ new = cpu_to_le64(new);
+ }
+
+ return __kvm_at_swap_desc(vcpu->kvm, pa, old, new);
+}
+
static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
struct s1_walk_result *wr, u64 va)
{
- u64 va_top, va_bottom, baddr, desc;
+ u64 va_top, va_bottom, baddr, desc, new_desc, ipa;
int level, stride, ret;
level = wi->sl;
@@ -393,7 +409,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
va_top = get_ia_size(wi) - 1;
while (1) {
- u64 index, ipa;
+ u64 index;
va_bottom = (3 - level) * stride + wi->pgshift;
index = (va & GENMASK_ULL(va_top, va_bottom)) >> (va_bottom - 3);
@@ -438,6 +454,8 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
return ret;
}
+ new_desc = desc;
+
/* Invalid descriptor */
if (!(desc & BIT(0)))
goto transfault;
@@ -490,6 +508,17 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
if (check_output_size(baddr & GENMASK(52, va_bottom), wi))
goto addrsz;
+ if (wi->ha)
+ new_desc |= PTE_AF;
+
+ if (new_desc != desc) {
+ ret = kvm_swap_s1_desc(vcpu, ipa, desc, new_desc, wi);
+ if (ret)
+ return ret;
+
+ desc = new_desc;
+ }
+
if (!(desc & PTE_AF)) {
fail_s1_walk(wr, ESR_ELx_FSC_ACCESS_L(level), false);
return -EACCES;