From a336282d7753a92ced7b8e52ff959929f6e550ff Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:11 +0200 Subject: [PATCH] x86/kvm/nVMX: introduce source data cache for kvm_init_shadow_ept_mmu() MMU re-initialization is expensive, in particular, update_permission_bitmask() and update_pkru_bitmask() are. Cache the data used to setup shadow EPT MMU and avoid full re-init when it is unchanged. Signed-off-by: Vitaly Kuznetsov Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 14 +++++++++ arch/x86/kvm/mmu.c | 53 +++++++++++++++++++++++---------- 2 files changed, 52 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 51ce635494b0..bf541af7442d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -283,7 +283,21 @@ union kvm_mmu_page_role { }; union kvm_mmu_extended_role { +/* + * This structure complements kvm_mmu_page_role caching everything needed for + * MMU configuration. If nothing in both these structures changed, MMU + * re-configuration can be skipped. @valid bit is set on first usage so we don't + * treat all-zero structure as valid data. + */ u32 word; + struct { + unsigned int valid:1; + unsigned int execonly:1; + unsigned int cr4_pse:1; + unsigned int cr4_pke:1; + unsigned int cr4_smap:1; + unsigned int cr4_smep:1; + }; }; union kvm_mmu_role { diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3301973527aa..9d400a06c5f2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4724,6 +4724,20 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu, paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); } +static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) +{ + union kvm_mmu_extended_role ext = {0}; + + ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); + ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); + ext.cr4_pse = !!is_pse(vcpu); + ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); + + ext.valid = 1; + + return ext; +} + static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu) { @@ -4830,19 +4844,23 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); -static union kvm_mmu_page_role -kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) +static union kvm_mmu_role +kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, + bool execonly) { - union kvm_mmu_page_role role; + union kvm_mmu_role role; - /* Role is inherited from root_mmu */ - role.word = vcpu->arch.root_mmu.base_role.word; + /* Base role is inherited from root_mmu */ + role.base.word = vcpu->arch.root_mmu.mmu_role.base.word; + role.ext = kvm_calc_mmu_role_ext(vcpu); - role.level = PT64_ROOT_4LEVEL; - role.direct = false; - role.ad_disabled = !accessed_dirty; - role.guest_mode = true; - role.access = ACC_ALL; + role.base.level = PT64_ROOT_4LEVEL; + role.base.direct = false; + role.base.ad_disabled = !accessed_dirty; + role.base.guest_mode = true; + role.base.access = ACC_ALL; + + role.ext.execonly = execonly; return role; } @@ -4851,10 +4869,16 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool accessed_dirty, gpa_t new_eptp) { struct kvm_mmu *context = vcpu->arch.mmu; - union kvm_mmu_page_role root_page_role = - kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty); + union kvm_mmu_role new_role = + kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, + execonly); + + __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; - __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false); context->shadow_root_level = PT64_ROOT_4LEVEL; context->nx = true; @@ -4866,8 +4890,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->update_pte = ept_update_pte; context->root_level = PT64_ROOT_4LEVEL; context->direct_map = false; - context->mmu_role.base.word = - root_page_role.word & mmu_base_role_mask.word; + context->mmu_role.as_u64 = new_role.as_u64; update_permission_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true); -- 2.30.2