From bdd303cb1bdb24e71eef8e4510b27166bfadf286 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Mon, 5 Nov 2018 14:45:03 +0800 Subject: [PATCH] KVM: fix some typos MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Signed-off-by: Wei Yang [Preserved the iff and a probably intentional weird bracket notation. Also dropped the style change to make a single-purpose patch. - Radim] Signed-off-by: Radim Krčmář --- arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/x86.c | 2 +- virt/kvm/async_pf.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7c03c0f35444..1fba9f356218 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5638,7 +5638,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, * spte from present to present (changing the spte from present * to nonpresent will flush all the TLBs immediately), in other * words, the only case we care is mmu_spte_update() where we - * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE + * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE * instead of PT_WRITABLE_MASK, that means it does not depend * on PT_WRITABLE_MASK anymore. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 70faa3cdc4dc..99e9b6964fce 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9344,7 +9344,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, * with dirty logging disabled in order to eliminate unnecessary GPA * logging in PML buffer (and potential PML buffer full VMEXT). This * guarantees leaving PML enabled during guest's lifetime won't have - * any additonal overhead from PML when guest is running with dirty + * any additional overhead from PML when guest is running with dirty * logging disabled for memory slots. * * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 23c2519c5b32..110cbe3f74f8 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -82,7 +82,7 @@ static void async_pf_execute(struct work_struct *work) might_sleep(); /* - * This work is run asynchromously to the task which owns + * This work is run asynchronously to the task which owns * mm and might be done in another context, so we must * access remotely. */ -- 2.30.2