KVM: x86: remove check on nr_mmu_pages in kvm_arch_commit_memory_region()
authorWei Yang <richard.weiyang@gmail.com>
Thu, 27 Sep 2018 00:31:26 +0000 (08:31 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 28 Mar 2019 16:27:19 +0000 (17:27 +0100)
* nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is
  non-zero.

* nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages()
  never return zero.

Based on these two reasons, we can merge the two *if* clause and use the
return value from kvm_mmu_calculate_mmu_pages() directly. This simplify
the code and also eliminate the possibility for reader to believe
nr_mmu_pages would be zero.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index 5b03006c00beab7ea34a04a9e245e17d6db78896..679168931c400ffb387b8dd66360fb8d2d32ffa3 100644 (file)
@@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
index f6d760dcdb75e3b70ea9e3fed5ecd78ce75a9262..5a9981465fbb5351f8596f6af6927fa53cb4ebdc 100644 (file)
@@ -6028,7 +6028,7 @@ out:
 /*
  * Calculate mmu pages needed for kvm.
  */
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
 {
        unsigned int nr_mmu_pages;
        unsigned int  nr_pages = 0;
index 65e4559eef2fc8589e0a4277077e766ceead3994..491e92383da818efaa08d8eb179d79dbbeece918 100644 (file)
@@ -9429,13 +9429,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                                const struct kvm_memory_slot *new,
                                enum kvm_mr_change change)
 {
-       int nr_mmu_pages = 0;
-
        if (!kvm->arch.n_requested_mmu_pages)
-               nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
-
-       if (nr_mmu_pages)
-               kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+               kvm_mmu_change_mmu_pages(kvm,
+                               kvm_mmu_calculate_default_mmu_pages(kvm));
 
        /*
         * Dirty logging tracks sptes in 4k granularity, meaning that large