arm64: KVM: Enable VHE support for :G/:H perf event modifiers
authorAndrew Murray <andrew.murray@arm.com>
Tue, 9 Apr 2019 19:22:15 +0000 (20:22 +0100)
committerMarc Zyngier <marc.zyngier@arm.com>
Wed, 24 Apr 2019 14:46:26 +0000 (15:46 +0100)
With VHE different exception levels are used between the host (EL2) and
guest (EL1) with a shared exception level for userpace (EL0). We can take
advantage of this and use the PMU's exception level filtering to avoid
enabling/disabling counters in the world-switch code. Instead we just
modify the counter type to include or exclude EL0 at vcpu_{load,put} time.

We also ensure that trapped PMU system register writes do not re-enable
EL0 when reconfiguring the backing perf events.

This approach completely avoids blackout windows seen with !VHE.

Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Andrew Murray <andrew.murray@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/perf_event.c
arch/arm64/kvm/pmu.c
arch/arm64/kvm/sys_regs.c
virt/kvm/arm/arm.c

index 2d721ab05925755b88b88772172db0ee9315bbeb..075e1921fdd909096715f6c23c2ab0eb185f0b1a 100644 (file)
@@ -368,6 +368,9 @@ static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
 
+static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+
 static inline void kvm_arm_vhe_guest_enter(void) {}
 static inline void kvm_arm_vhe_guest_exit(void) {}
 
index 645d74c705d649bf6dc1373637d27130a917469e..2a8d3f8ca22c38c0a747115b07b97131536611c4 100644 (file)
@@ -580,7 +580,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
 
 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
 {
-       return attr->exclude_host;
+       return (!has_vhe() && attr->exclude_host);
 }
 
 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
@@ -594,6 +594,9 @@ void kvm_clr_pmu_events(u32 clr);
 
 void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt);
 bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt);
+
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 #else
 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
 static inline void kvm_clr_pmu_events(u32 clr) {}
index 6bb28aaf5aea379f42ac42ec88342713e5bba190..314b1adedf065fb86d0ece848ee7c57d30efe09b 100644 (file)
@@ -847,8 +847,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
         * with other architectures (x86 and Power).
         */
        if (is_kernel_in_hyp_mode()) {
-               if (!attr->exclude_kernel)
+               if (!attr->exclude_kernel && !attr->exclude_host)
                        config_base |= ARMV8_PMU_INCLUDE_EL2;
+               if (attr->exclude_guest)
+                       config_base |= ARMV8_PMU_EXCLUDE_EL1;
+               if (attr->exclude_host)
+                       config_base |= ARMV8_PMU_EXCLUDE_EL0;
        } else {
                if (!attr->exclude_hv && !attr->exclude_host)
                        config_base |= ARMV8_PMU_INCLUDE_EL2;
index 599e6d3f692ee703a7090bb93baa5e91a613074f..3f99a095a1ffc013bb761c451baba8252d6326e2 100644 (file)
@@ -8,11 +8,19 @@
 #include <asm/kvm_hyp.h>
 
 /*
- * Given the exclude_{host,guest} attributes, determine if we are going
- * to need to switch counters at guest entry/exit.
+ * Given the perf event attributes and system type, determine
+ * if we are going to need to switch counters at guest entry/exit.
  */
 static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
 {
+       /**
+        * With VHE the guest kernel runs at EL1 and the host at EL2,
+        * where user (EL0) is excluded then we have no reason to switch
+        * counters.
+        */
+       if (has_vhe() && attr->exclude_user)
+               return false;
+
        /* Only switch if attributes are different */
        return (attr->exclude_host != attr->exclude_guest);
 }
@@ -82,3 +90,79 @@ void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
        if (pmu->events_host)
                write_sysreg(pmu->events_host, pmcntenset_el0);
 }
+
+/*
+ * Modify ARMv8 PMU events to include EL0 counting
+ */
+static void kvm_vcpu_pmu_enable_el0(unsigned long events)
+{
+       u64 typer;
+       u32 counter;
+
+       for_each_set_bit(counter, &events, 32) {
+               write_sysreg(counter, pmselr_el0);
+               isb();
+               typer = read_sysreg(pmxevtyper_el0) & ~ARMV8_PMU_EXCLUDE_EL0;
+               write_sysreg(typer, pmxevtyper_el0);
+               isb();
+       }
+}
+
+/*
+ * Modify ARMv8 PMU events to exclude EL0 counting
+ */
+static void kvm_vcpu_pmu_disable_el0(unsigned long events)
+{
+       u64 typer;
+       u32 counter;
+
+       for_each_set_bit(counter, &events, 32) {
+               write_sysreg(counter, pmselr_el0);
+               isb();
+               typer = read_sysreg(pmxevtyper_el0) | ARMV8_PMU_EXCLUDE_EL0;
+               write_sysreg(typer, pmxevtyper_el0);
+               isb();
+       }
+}
+
+/*
+ * On VHE ensure that only guest events have EL0 counting enabled
+ */
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpu_context *host_ctxt;
+       struct kvm_host_data *host;
+       u32 events_guest, events_host;
+
+       if (!has_vhe())
+               return;
+
+       host_ctxt = vcpu->arch.host_cpu_context;
+       host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+       events_guest = host->pmu_events.events_guest;
+       events_host = host->pmu_events.events_host;
+
+       kvm_vcpu_pmu_enable_el0(events_guest);
+       kvm_vcpu_pmu_disable_el0(events_host);
+}
+
+/*
+ * On VHE ensure that only host events have EL0 counting enabled
+ */
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpu_context *host_ctxt;
+       struct kvm_host_data *host;
+       u32 events_guest, events_host;
+
+       if (!has_vhe())
+               return;
+
+       host_ctxt = vcpu->arch.host_cpu_context;
+       host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+       events_guest = host->pmu_events.events_guest;
+       events_host = host->pmu_events.events_host;
+
+       kvm_vcpu_pmu_enable_el0(events_host);
+       kvm_vcpu_pmu_disable_el0(events_guest);
+}
index 12bd72e42b91e85408d3891400f715d58ed8dce7..9d02643bc60161dfb6fcbfb83f199bdbecdf7ed2 100644 (file)
@@ -695,6 +695,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                val |= p->regval & ARMV8_PMU_PMCR_MASK;
                __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
                kvm_pmu_handle_pmcr(vcpu, val);
+               kvm_vcpu_pmu_restore_guest(vcpu);
        } else {
                /* PMCR.P & PMCR.C are RAZ */
                val = __vcpu_sys_reg(vcpu, PMCR_EL0)
@@ -850,6 +851,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        if (p->is_write) {
                kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
                __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
+               kvm_vcpu_pmu_restore_guest(vcpu);
        } else {
                p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
        }
@@ -875,6 +877,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                        /* accessing PMCNTENSET_EL0 */
                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
                        kvm_pmu_enable_counter(vcpu, val);
+                       kvm_vcpu_pmu_restore_guest(vcpu);
                } else {
                        /* accessing PMCNTENCLR_EL0 */
                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
index e960b91551d6067b712ebb92776804bc03df7b95..8b7ca101f0f7791b2082f62fcfae361559509414 100644 (file)
@@ -382,6 +382,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        kvm_timer_vcpu_load(vcpu);
        kvm_vcpu_load_sysregs(vcpu);
        kvm_arch_vcpu_load_fp(vcpu);
+       kvm_vcpu_pmu_restore_guest(vcpu);
 
        if (single_task_running())
                vcpu_clear_wfe_traps(vcpu);
@@ -397,6 +398,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        kvm_vcpu_put_sysregs(vcpu);
        kvm_timer_vcpu_put(vcpu);
        kvm_vgic_put(vcpu);
+       kvm_vcpu_pmu_restore_host(vcpu);
 
        vcpu->cpu = -1;