dev_data->alias_data = alias_data;
dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
----- } else
+++++ }
+++++
+++++ if (dma_pdev == NULL)
dma_pdev = pci_dev_get(pdev);
+ /* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+ /*
+ * If it's a multifunction device that does not support our
+ * required ACS flags, add to the same group as function 0.
+ */
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
*/
struct amd_iommu **amd_iommu_rlookup_table;
--- - * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
+ /*
- * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
+++++ * This table is used to find the irq remapping table for a given device id
+++++ * quickly.
+++++ */
+++++ struct irq_remap_table **irq_lookup_table;
+++++
+++ + /*
+++ ++ * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
unsigned long *amd_iommu_pd_alloc_bitmap;
set_iommu_for_device(iommu, devid);
}
+++++ static int add_special_device(u8 type, u8 id, u16 devid)
+++++ {
+++++ struct devid_map *entry;
+++++ struct list_head *list;
+++++
+++++ if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
+++++ return -EINVAL;
+++++
+++++ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+++++ if (!entry)
+++++ return -ENOMEM;
+++++
+++++ entry->id = id;
+++++ entry->devid = devid;
+++++
+++++ if (type == IVHD_SPECIAL_IOAPIC)
+++++ list = &ioapic_map;
+++++ else
+++++ list = &hpet_map;
+++++
+++++ list_add_tail(&entry->list, list);
+++++
+++++ return 0;
+++++ }
+++++
/*
--- -- * Reads the device exclusion range from ACPI and initialize IOMMU with
+++ ++ * Reads the device exclusion range from ACPI and initializes the IOMMU with
* it
*/
static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
}
--- -- }
pr_cont("\n");
+++ ++ }
}
+++++ if (irq_remapping_enabled)
+++++ pr_info("AMD-Vi: Interrupt remapping enabled\n");
}
static int __init amd_iommu_init_pci(void)
early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ /* Make sure ACS will be enabled during PCI probe */
+ pci_request_acs();
+
+++++ if (!disable_irq_remap)
+++++ amd_iommu_irq_remap = true;
+++++
return true;
}
return ret;
}
+++++ #ifdef CONFIG_IRQ_REMAP
+++++ int __init amd_iommu_prepare(void)
+++++ {
+++++ return iommu_go_to_state(IOMMU_ACPI_FINISHED);
+++++ }
+ +++
+++++ int __init amd_iommu_supported(void)
+++++ {
+++++ return amd_iommu_irq_remap ? 1 : 0;
+++++ }
+++++
+++++ int __init amd_iommu_enable(void)
+++++ {
+++++ int ret;
+++++
+++++ ret = iommu_go_to_state(IOMMU_ENABLED);
+++++ if (ret)
+++++ return ret;
+++++
+++++ irq_remapping_enabled = 1;
+++ +
+++++ return 0;
+++++ }
+++++
+++++ void amd_iommu_disable(void)
+++++ {
+++++ amd_iommu_suspend();
+++++ }
+++++
+++++ int amd_iommu_reenable(int mode)
+++++ {
+++++ amd_iommu_resume();
+++++
+++++ return 0;
+++++ }
+ +
+++++ int __init amd_iommu_enable_faulting(void)
+++++ {
+++++ /* We enable MSI later when PCI is initialized */
+++++ return 0;
+++++ }
+++++ #endif
/*
* This is the core init function for AMD IOMMU hardware in the system.