}
}
+/* Flush the not present cache if it exists */
+static void domain_flush_np_cache(struct protection_domain *domain,
+ dma_addr_t iova, size_t size)
+{
+ if (unlikely(amd_iommu_np_cache)) {
+ domain_flush_pages(domain, iova, size);
+ domain_flush_complete(domain);
+ }
+}
+
/*
* This function flushes the DTEs for all devices in domain
}
address += offset;
- if (unlikely(amd_iommu_np_cache)) {
- domain_flush_pages(&dma_dom->domain, address, size);
- domain_flush_complete(&dma_dom->domain);
- }
+ domain_flush_np_cache(&dma_dom->domain, address, size);
out:
return address;
s->dma_length = s->length;
}
+ domain_flush_np_cache(domain, s->dma_address, s->dma_length);
+
return nelems;
out_unmap:
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
mutex_unlock(&domain->api_lock);
+ domain_flush_np_cache(domain, iova, page_size);
+
return ret;
}