arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support
authorMarc Zyngier <marc.zyngier@arm.com>
Tue, 6 Feb 2018 17:56:14 +0000 (17:56 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 6 Feb 2018 22:54:05 +0000 (22:54 +0000)
A new feature of SMCCC 1.1 is that it offers firmware-based CPU
workarounds. In particular, SMCCC_ARCH_WORKAROUND_1 provides
BP hardening for CVE-2017-5715.

If the host has some mitigation for this issue, report that
we deal with it using SMCCC_ARCH_WORKAROUND_1, as we apply the
host workaround on every guest exit.

Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
include/linux/arm-smccc.h
virt/kvm/arm/psci.c

index acbf9ec7b396e329b597a1710874e40b0e2e9bf6..ef54013b5b9f1ee3f9656e41d0451cbc160cd56e 100644 (file)
@@ -306,4 +306,11 @@ static inline void kvm_fpsimd_flush_cpu_state(void) {}
 
 static inline void kvm_arm_vhe_guest_enter(void) {}
 static inline void kvm_arm_vhe_guest_exit(void) {}
+
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+       /* No way to detect it yet, pretend it is not there. */
+       return false;
+}
+
 #endif /* __ARM_KVM_HOST_H__ */
index 4485ae8e98ded78193596582f459ea73fc2c095a..a73f63aca68e9e56e452508ef31dcac0fe3c1850 100644 (file)
@@ -415,4 +415,10 @@ static inline void kvm_arm_vhe_guest_exit(void)
 {
        local_daif_restore(DAIF_PROCCTX_NOIRQ);
 }
+
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+       return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+}
+
 #endif /* __ARM64_KVM_HOST_H__ */
index dc68aa5a7261c206468de7e47dd1131e748c5f8b..e1ef944ef1da9b5de7b48b2cabf2bd962719af23 100644 (file)
                           ARM_SMCCC_SMC_32,                            \
                           0, 1)
 
+#define ARM_SMCCC_ARCH_WORKAROUND_1                                    \
+       ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
+                          ARM_SMCCC_SMC_32,                            \
+                          0, 0x8000)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/linkage.h>
index e105c1153794b864eadd150a1f6176e5f3193038..6919352cbf15e8d50b3742e32acfd152be4b18a8 100644 (file)
@@ -405,13 +405,20 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
 {
        u32 func_id = smccc_get_function(vcpu);
        u32 val = PSCI_RET_NOT_SUPPORTED;
+       u32 feature;
 
        switch (func_id) {
        case ARM_SMCCC_VERSION_FUNC_ID:
                val = ARM_SMCCC_VERSION_1_1;
                break;
        case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
-               /* Nothing supported yet */
+               feature = smccc_get_arg1(vcpu);
+               switch(feature) {
+               case ARM_SMCCC_ARCH_WORKAROUND_1:
+                       if (kvm_arm_harden_branch_predictor())
+                               val = 0;
+                       break;
+               }
                break;
        default:
                return kvm_psci_call(vcpu);