KVM: PPC: Book3S HV: Avoid lockdep debugging in TCE realmode handlers
authorAlexey Kardashevskiy <aik@ozlabs.ru>
Fri, 29 Mar 2019 05:42:20 +0000 (16:42 +1100)
committerPaul Mackerras <paulus@ozlabs.org>
Tue, 30 Apr 2019 04:43:13 +0000 (14:43 +1000)
The kvmppc_tce_to_ua() helper is called from real and virtual modes
and it works fine as long as CONFIG_DEBUG_LOCKDEP is not enabled.
However if the lockdep debugging is on, the lockdep will most likely break
in kvm_memslots() because of srcu_dereference_check() so we need to use
PPC-own kvm_memslots_raw() which uses realmode safe
rcu_dereference_raw_notrace().

This creates a realmode copy of kvmppc_tce_to_ua() which replaces
kvm_memslots() with kvm_memslots_raw().

Since kvmppc_rm_tce_to_ua() becomes static and can only be used inside
HV KVM, this moves it earlier under CONFIG_KVM_BOOK3S_HV_POSSIBLE.

This moves truly virtual-mode kvmppc_tce_to_ua() to where it belongs and
drops the prmap parameter which was never used in the virtual mode.

Fixes: d3695aa4f452 ("KVM: PPC: Add support for multiple-TCE hcalls", 2016-02-15)
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c

index df0b173da3e0d8c064a246fd014bb345a088d457..02732eb156ae1d6d2e3c9a096f6d2b130a8a6722 100644 (file)
@@ -197,8 +197,6 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
                (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
                                (stt)->size, (ioba), (npages)) ?        \
                                H_PARAMETER : H_SUCCESS)
-extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
-               unsigned long *ua, unsigned long **prmap);
 extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
                unsigned long idx, unsigned long tce);
 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
index f100e331e69b6ad37f5f6323219ada40fe8c8641..5e3b62c491f887c66f28cb0b85a5f1c7293497a3 100644 (file)
@@ -363,6 +363,22 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
        return ret;
 }
 
+static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
+               unsigned long *ua)
+{
+       unsigned long gfn = tce >> PAGE_SHIFT;
+       struct kvm_memory_slot *memslot;
+
+       memslot = search_memslots(kvm_memslots(kvm), gfn);
+       if (!memslot)
+               return -EINVAL;
+
+       *ua = __gfn_to_hva_memslot(memslot, gfn) |
+               (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+
+       return 0;
+}
+
 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
                unsigned long tce)
 {
@@ -378,7 +394,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
        if (iommu_tce_check_gpa(stt->page_shift, gpa))
                return H_TOO_HARD;
 
-       if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+       if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
                return H_TOO_HARD;
 
        list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
@@ -551,7 +567,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 
        dir = iommu_tce_direction(tce);
 
-       if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
+       if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
                ret = H_PARAMETER;
                goto unlock_exit;
        }
@@ -612,7 +628,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                return ret;
 
        idx = srcu_read_lock(&vcpu->kvm->srcu);
-       if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
+       if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
                ret = H_TOO_HARD;
                goto unlock_exit;
        }
@@ -647,7 +663,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                }
                tce = be64_to_cpu(tce);
 
-               if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+               if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua))
                        return H_PARAMETER;
 
                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
index 2206bc729b9a07c4c0715af3fbabc916f7e13a72..31e59748832a3c1085f6d0395216e4fbdf83119b 100644 (file)
@@ -88,6 +88,25 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
 EXPORT_SYMBOL_GPL(kvmppc_find_table);
 
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
+               unsigned long *ua, unsigned long **prmap)
+{
+       unsigned long gfn = tce >> PAGE_SHIFT;
+       struct kvm_memory_slot *memslot;
+
+       memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
+       if (!memslot)
+               return -EINVAL;
+
+       *ua = __gfn_to_hva_memslot(memslot, gfn) |
+               (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+
+       if (prmap)
+               *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+
+       return 0;
+}
+
 /*
  * Validates TCE address.
  * At the moment flags and page mask are validated.
@@ -111,7 +130,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
        if (iommu_tce_check_gpa(stt->page_shift, gpa))
                return H_PARAMETER;
 
-       if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+       if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
                return H_TOO_HARD;
 
        list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -181,28 +200,6 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
 }
 EXPORT_SYMBOL_GPL(kvmppc_tce_put);
 
-long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
-               unsigned long *ua, unsigned long **prmap)
-{
-       unsigned long gfn = tce >> PAGE_SHIFT;
-       struct kvm_memory_slot *memslot;
-
-       memslot = search_memslots(kvm_memslots(kvm), gfn);
-       if (!memslot)
-               return -EINVAL;
-
-       *ua = __gfn_to_hva_memslot(memslot, gfn) |
-               (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
-
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-       if (prmap)
-               *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
-#endif
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
-
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
                unsigned long entry, unsigned long *hpa,
@@ -390,7 +387,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                return ret;
 
        dir = iommu_tce_direction(tce);
-       if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+       if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
                return H_PARAMETER;
 
        entry = ioba >> stt->page_shift;
@@ -492,7 +489,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                 */
                struct mm_iommu_table_group_mem_t *mem;
 
-               if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+               if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
                        return H_TOO_HARD;
 
                mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
@@ -508,7 +505,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                 * We do not require memory to be preregistered in this case
                 * so lock rmap and do __find_linux_pte_or_hugepte().
                 */
-               if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+               if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
                        return H_TOO_HARD;
 
                rmap = (void *) vmalloc_to_phys(rmap);
@@ -542,7 +539,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
 
                ua = 0;
-               if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+               if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
                        return H_PARAMETER;
 
                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {