summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/config.c
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2025-11-20 10:26:13 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2025-11-20 10:26:13 -0500
commit82f78acd5a9270370ef4aa3f032ede25f3dc91ee (patch)
treea0aa5f6ed6b9af6e270557ecdea2e2839c1ed2e2 /arch/arm64/kvm/config.c
parent38725491e7665640545c8155db53a7b21bcdf886 (diff)
parentb06b8c421485e0e96d7fd6aa614fb0b6f2778a03 (diff)
Merge patch series "Add OP-TEE based RPMB driver for UFS devices"
Bean Huo <beanhuo@iokpp.de> says: This patch series introduces OP-TEE based RPMB (Replay Protected Memory Block) support for UFS devices, extending the kernel-level secure storage capabilities that are currently available for eMMC devices. Previously, OP-TEE required a userspace supplicant to access RPMB partitions, which created complex dependencies and reliability issues, especially during early boot scenarios. Recent work by Linaro has moved core supplicant functionality directly into the Linux kernel for eMMC devices, eliminating userspace dependencies and enabling immediate secure storage access. This series extends the same approach to UFS devices, which are used in enterprise and mobile applications that require secure storage capabilities. Benefits: - Eliminates dependency on userspace supplicant for UFS RPMB access - Enables early boot secure storage access (e.g., fTPM, secure UEFI variables) - Provides kernel-level RPMB access as soon as UFS driver is initialized - Removes complex initramfs dependencies and boot ordering requirements - Ensures reliable and deterministic secure storage operations - Supports both built-in and modular fTPM configurations. Prerequisites: -------------- This patch series depends on commit 7e8242405b94 ("rpmb: move struct rpmb_frame to common header") which has been merged into mainline v6.18-rc2. Link: https://patch.msgid.link/20251107230518.4060231-1-beanhuo@iokpp.de Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'arch/arm64/kvm/config.c')
-rw-r--r--arch/arm64/kvm/config.c90
1 files changed, 90 insertions, 0 deletions
diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
index fbd8944a3dea..24bb3f36e9d5 100644
--- a/arch/arm64/kvm/config.c
+++ b/arch/arm64/kvm/config.c
@@ -5,6 +5,8 @@
*/
#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
#include <asm/sysreg.h>
/*
@@ -1428,3 +1430,91 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
break;
}
}
+
+static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)
+{
+ switch (reg) {
+ case HFGRTR_EL2:
+ return &hfgrtr_masks;
+ case HFGWTR_EL2:
+ return &hfgwtr_masks;
+ case HFGITR_EL2:
+ return &hfgitr_masks;
+ case HDFGRTR_EL2:
+ return &hdfgrtr_masks;
+ case HDFGWTR_EL2:
+ return &hdfgwtr_masks;
+ case HAFGRTR_EL2:
+ return &hafgrtr_masks;
+ case HFGRTR2_EL2:
+ return &hfgrtr2_masks;
+ case HFGWTR2_EL2:
+ return &hfgwtr2_masks;
+ case HFGITR2_EL2:
+ return &hfgitr2_masks;
+ case HDFGRTR2_EL2:
+ return &hdfgrtr2_masks;
+ case HDFGWTR2_EL2:
+ return &hdfgwtr2_masks;
+ default:
+ BUILD_BUG_ON(1);
+ }
+}
+
+static __always_inline void __compute_fgt(struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
+{
+ u64 fgu = vcpu->kvm->arch.fgu[__fgt_reg_to_group_id(reg)];
+ struct fgt_masks *m = __fgt_reg_to_masks(reg);
+ u64 clear = 0, set = 0, val = m->nmask;
+
+ set |= fgu & m->mask;
+ clear |= fgu & m->nmask;
+
+ if (is_nested_ctxt(vcpu)) {
+ u64 nested = __vcpu_sys_reg(vcpu, reg);
+ set |= nested & m->mask;
+ clear |= ~nested & m->nmask;
+ }
+
+ val |= set;
+ val &= ~clear;
+ *vcpu_fgt(vcpu, reg) = val;
+}
+
+static void __compute_hfgwtr(struct kvm_vcpu *vcpu)
+{
+ __compute_fgt(vcpu, HFGWTR_EL2);
+
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ *vcpu_fgt(vcpu, HFGWTR_EL2) |= HFGWTR_EL2_TCR_EL1;
+}
+
+static void __compute_hdfgwtr(struct kvm_vcpu *vcpu)
+{
+ __compute_fgt(vcpu, HDFGWTR_EL2);
+
+ if (is_hyp_ctxt(vcpu))
+ *vcpu_fgt(vcpu, HDFGWTR_EL2) |= HDFGWTR_EL2_MDSCR_EL1;
+}
+
+void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
+{
+ if (!cpus_have_final_cap(ARM64_HAS_FGT))
+ return;
+
+ __compute_fgt(vcpu, HFGRTR_EL2);
+ __compute_hfgwtr(vcpu);
+ __compute_fgt(vcpu, HFGITR_EL2);
+ __compute_fgt(vcpu, HDFGRTR_EL2);
+ __compute_hdfgwtr(vcpu);
+ __compute_fgt(vcpu, HAFGRTR_EL2);
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT2))
+ return;
+
+ __compute_fgt(vcpu, HFGRTR2_EL2);
+ __compute_fgt(vcpu, HFGWTR2_EL2);
+ __compute_fgt(vcpu, HFGITR2_EL2);
+ __compute_fgt(vcpu, HDFGRTR2_EL2);
+ __compute_fgt(vcpu, HDFGWTR2_EL2);
+}