kvm: x86: Add a root_hpa parameter to kvm_mmu->invlpg()
authorJunaid Shahid <junaids@google.com>
Wed, 27 Jun 2018 21:59:16 +0000 (14:59 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 6 Aug 2018 15:58:58 +0000 (17:58 +0200)
This allows invlpg() to be called using either the active root_hpa
or the prev_root_hpa.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index 8b4aa5e7ff924723ee4ea23508f5428164d1db69..0b77c233e441482aafdab7ae312153b22b90cf78 100644 (file)
@@ -354,7 +354,7 @@ struct kvm_mmu {
                               struct x86_exception *exception);
        int (*sync_page)(struct kvm_vcpu *vcpu,
                         struct kvm_mmu_page *sp);
-       void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
+       void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
        void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                           u64 *spte, const void *pte);
        hpa_t root_hpa;
index 2b7cfc8e41bba49d81ef5b3906f594300f3253d8..6eeca915511e99d8b18fbf183fbb4cf9617cfd71 100644 (file)
@@ -189,7 +189,13 @@ static const union kvm_mmu_page_role mmu_base_role_mask = {
        .ad_disabled = 1,
 };
 
-#define for_each_shadow_entry(_vcpu, _addr, _walker)    \
+#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
+       for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
+                                        (_root), (_addr));                \
+            shadow_walk_okay(&(_walker));                                 \
+            shadow_walk_next(&(_walker)))
+
+#define for_each_shadow_entry(_vcpu, _addr, _walker)            \
        for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
             shadow_walk_okay(&(_walker));                      \
             shadow_walk_next(&(_walker)))
@@ -1999,7 +2005,7 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
+static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
 {
 }
 
@@ -2405,11 +2411,12 @@ out:
        return sp;
 }
 
-static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
-                            struct kvm_vcpu *vcpu, u64 addr)
+static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
+                                       struct kvm_vcpu *vcpu, hpa_t root,
+                                       u64 addr)
 {
        iterator->addr = addr;
-       iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
+       iterator->shadow_addr = root;
        iterator->level = vcpu->arch.mmu.shadow_root_level;
 
        if (iterator->level == PT64_ROOT_4LEVEL &&
@@ -2418,6 +2425,12 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
                --iterator->level;
 
        if (iterator->level == PT32E_ROOT_LEVEL) {
+               /*
+                * prev_root is currently only used for 64-bit hosts. So only
+                * the active root_hpa is valid here.
+                */
+               BUG_ON(root != vcpu->arch.mmu.root_hpa);
+
                iterator->shadow_addr
                        = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
                iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
@@ -2427,6 +2440,13 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
        }
 }
 
+static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
+                            struct kvm_vcpu *vcpu, u64 addr)
+{
+       shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa,
+                                   addr);
+}
+
 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
 {
        if (iterator->level < PT_PAGE_TABLE_LEVEL)
@@ -5186,7 +5206,9 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       vcpu->arch.mmu.invlpg(vcpu, gva);
+       struct kvm_mmu *mmu = &vcpu->arch.mmu;
+
+       mmu->invlpg(vcpu, gva, mmu->root_hpa);
        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        ++vcpu->stat.invlpg;
 }
@@ -5197,7 +5219,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
        struct kvm_mmu *mmu = &vcpu->arch.mmu;
 
        if (pcid == kvm_get_active_pcid(vcpu)) {
-               mmu->invlpg(vcpu, gva);
+               mmu->invlpg(vcpu, gva, mmu->root_hpa);
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        }
 
index 208f7646ce0d9ac83dbbb5053e90ebc086d14bf2..14ffd973df54e6d1c122b14d6f15f33cea931ed6 100644 (file)
@@ -856,7 +856,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
        return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 }
 
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 {
        struct kvm_shadow_walk_iterator iterator;
        struct kvm_mmu_page *sp;
@@ -871,13 +871,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
         */
        mmu_topup_memory_caches(vcpu);
 
-       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+       if (!VALID_PAGE(root_hpa)) {
                WARN_ON(1);
                return;
        }
 
        spin_lock(&vcpu->kvm->mmu_lock);
-       for_each_shadow_entry(vcpu, gva, iterator) {
+       for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
                level = iterator.level;
                sptep = iterator.sptep;