From c03ab37cbe1db0ec9186d8de04dd3801c0af0fba Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Sun, 21 Oct 2007 16:42:00 -0700 Subject: [PATCH] intel-iommu sg chaining support x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't support sg chaining, we will get data corruption. Signed-off-by: FUJITA Tomonori Acked-by: Anil S Keshavamurthy Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pci/intel-iommu.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 8f372c8f3d87..b3d70310af49 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1963,7 +1963,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, } #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg, +static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, int dir) { int i; @@ -1973,16 +1973,17 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg, struct iova *iova; size_t size = 0; void *addr; + struct scatterlist *sg; if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) return; domain = find_domain(pdev); - iova = find_iova(&domain->iovad, IOVA_PFN(sg[0].dma_address)); + iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); if (!iova) return; - for (i = 0; i < nelems; i++, sg++) { + for_each_sg(sglist, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); size += aligned_size((u64)addr, sg->length); } @@ -2003,21 +2004,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg, } static int intel_nontranslate_map_sg(struct device *hddev, - struct scatterlist *sg, int nelems, int dir) + struct scatterlist *sglist, int nelems, int dir) { int i; + struct scatterlist *sg; - for (i = 0; i < nelems; i++) { - struct scatterlist *s = &sg[i]; - BUG_ON(!s->page); - s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s)); - s->dma_length = s->length; + for_each_sg(sglist, sg, nelems, i) { + BUG_ON(!sg->page); + sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)); + sg->dma_length = sg->length; } return nelems; } -static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) +static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, int dir) { void *addr; int i; @@ -2028,18 +2029,18 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, size_t offset = 0; struct iova *iova = NULL; int ret; - struct scatterlist *orig_sg = sg; + struct scatterlist *sg; unsigned long start_addr; BUG_ON(dir == DMA_NONE); if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) - return intel_nontranslate_map_sg(hwdev, sg, nelems, dir); + return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); domain = get_valid_domain_for_dev(pdev); if (!domain) return 0; - for (i = 0; i < nelems; i++, sg++) { + for_each_sg(sglist, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); addr = (void *)virt_to_phys(addr); size += aligned_size((u64)addr, sg->length); @@ -2047,7 +2048,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, iova = __intel_alloc_iova(hwdev, domain, size); if (!iova) { - orig_sg->dma_length = 0; + sglist->dma_length = 0; return 0; } @@ -2063,8 +2064,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, start_addr = iova->pfn_lo << PAGE_SHIFT_4K; offset = 0; - sg = orig_sg; - for (i = 0; i < nelems; i++, sg++) { + for_each_sg(sglist, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); addr = (void *)virt_to_phys(addr); size = aligned_size((u64)addr, sg->length); -- 2.30.2