F: drivers/pinctrl/pinctrl-artpec*
F: Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
----- -L: openbmc@lists.ozlabs.org
+ ARM/ASPEED I2C DRIVER
+ M: Brendan Higgins <brendanhiggins@google.com>
+ R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ R: Joel Stanley <joel@jms.id.au>
+ L: linux-i2c@vger.kernel.org
+++++ + +L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
+ S: Maintained
+ F: drivers/irqchip/irq-aspeed-i2c-ic.c
+ F: drivers/i2c/busses/i2c-aspeed.c
+ F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt
+ F: Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
+
ARM/ASPEED MACHINE SUPPORT
M: Joel Stanley <joel@jms.id.au>
S: Maintained
F: drivers/staging/greybus/spilib.c
F: drivers/staging/greybus/spilib.h
----- -GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS
- GREYBUS PROTOCOLS DRIVERS
- M: David Lin <dtwlin@gmail.com>
+++++ + +GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
+ M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
S: Maintained
- F: drivers/staging/greybus/uart.c
- F: drivers/staging/greybus/log.c
+ F: drivers/staging/greybus/loopback.c
+ F: drivers/staging/greybus/timesync.c
+ F: drivers/staging/greybus/timesync_platform.c
GREYBUS PLATFORM DRIVERS
M: Vaibhav Hiremath <hvaibhav.linux@gmail.com>
S: Supported
F: arch/hexagon/
+++++++ ++++++QUALCOMM IOMMU
+++++++ ++++++M: Rob Clark <robdclark@gmail.com>
+++++++ ++++++L: iommu@lists.linux-foundation.org
+++++++ ++++++L: linux-arm-msm@vger.kernel.org
+++++++ ++++++S: Maintained
+++++++ ++++++F: drivers/iommu/qcom_iommu.c
+++++++ ++++++
+ QUALCOMM VENUS VIDEO ACCELERATOR DRIVER
+ M: Stanimir Varbanov <stanimir.varbanov@linaro.org>
+ L: linux-media@vger.kernel.org
+ L: linux-arm-msm@vger.kernel.org
+ T: git git://linuxtv.org/media_tree.git
+ S: Maintained
+ F: drivers/media/platform/qcom/venus/
+
QUALCOMM WCN36XX WIRELESS DRIVER
M: Eugene Krasnikov <k.eugene.e@gmail.com>
L: wcn36xx@lists.infradead.org
F: include/linux/virtio*.h
F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/
+++++ + +F: mm/balloon_compaction.c
+
+ VIRTIO CRYPTO DRIVER
+ M: Gonglei <arei.gonglei@huawei.com>
+ L: virtualization@lists.linux-foundation.org
+ L: linux-crypto@vger.kernel.org
+ S: Maintained
+ F: drivers/crypto/virtio/
+ F: include/uapi/linux/virtio_crypto.h
VIRTIO DRIVERS FOR S390
- M: Cornelia Huck <cornelia.huck@de.ibm.com>
+ M: Cornelia Huck <cohuck@redhat.com>
M: Halil Pasic <pasic@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
L: virtualization@lists.linux-foundation.org
static const struct dma_map_ops amd_iommu_dma_ops;
----------- --/*
----------- -- * This struct contains device specific data for the IOMMU
----------- -- */
----------- --struct iommu_dev_data {
----------- -- struct list_head list; /* For domain->dev_list */
----------- -- struct list_head dev_data_list; /* For global dev_data_list */
----------- -- struct protection_domain *domain; /* Domain the device is bound to */
----------- -- u16 devid; /* PCI Device ID */
----------- -- u16 alias; /* Alias Device ID */
----------- -- bool iommu_v2; /* Device can make use of IOMMUv2 */
----------- -- bool passthrough; /* Device is identity mapped */
----------- -- struct {
----------- -- bool enabled;
----------- -- int qdep;
----------- -- } ats; /* ATS state */
----------- -- bool pri_tlp; /* PASID TLB required for
----------- -- PPR completions */
----------- -- u32 errata; /* Bitmap for errata to apply */
----------- -- bool use_vapic; /* Enable device to use vapic mode */
---------- --
---------- -- struct ratelimit_state rs; /* Ratelimit IOPF messages */
----------- --};
----------- --
/*
* general struct to manage commands send to an IOMMU
*/
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
------ --- --
------ --- -- #define FLUSH_QUEUE_SIZE 256
------ --- --
------ --- -- struct flush_queue_entry {
------ --- -- unsigned long iova_pfn;
------ --- -- unsigned long pages;
------ --- -- u64 counter; /* Flush counter when this entry was added to the queue */
------ --- -- };
------ --- --
------ --- -- struct flush_queue {
------ --- -- struct flush_queue_entry *entries;
------ --- -- unsigned head, tail;
------ --- -- spinlock_t lock;
------ --- -- };
++++++ ++++++ static void iova_domain_flush_tlb(struct iova_domain *iovad);
/*
* Data container for a dma_ops specific protection domain
free_page((unsigned long)domain->gcr3_tbl);
}
------ --- -- static void dma_ops_domain_free_flush_queue(struct dma_ops_domain *dom)
------ --- -- {
------ --- -- int cpu;
------ --- --
------ --- -- for_each_possible_cpu(cpu) {
------ --- -- struct flush_queue *queue;
------ --- --
------ --- -- queue = per_cpu_ptr(dom->flush_queue, cpu);
------ --- -- kfree(queue->entries);
------ --- -- }
------ --- --
------ --- -- free_percpu(dom->flush_queue);
------ --- --
------ --- -- dom->flush_queue = NULL;
------ --- -- }
------ --- --
------ --- -- static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
------ --- -- {
------ --- -- int cpu;
------ --- --
------ --- -- atomic64_set(&dom->flush_start_cnt, 0);
------ --- -- atomic64_set(&dom->flush_finish_cnt, 0);
------ --- --
------ --- -- dom->flush_queue = alloc_percpu(struct flush_queue);
------ --- -- if (!dom->flush_queue)
------ --- -- return -ENOMEM;
------ --- --
------ --- -- /* First make sure everything is cleared */
------ --- -- for_each_possible_cpu(cpu) {
------ --- -- struct flush_queue *queue;
------ --- --
------ --- -- queue = per_cpu_ptr(dom->flush_queue, cpu);
------ --- -- queue->head = 0;
------ --- -- queue->tail = 0;
------ --- -- queue->entries = NULL;
------ --- -- }
------ --- --
------ --- -- /* Now start doing the allocation */
------ --- -- for_each_possible_cpu(cpu) {
------ --- -- struct flush_queue *queue;
------ --- --
------ --- -- queue = per_cpu_ptr(dom->flush_queue, cpu);
------ --- -- queue->entries = kzalloc(FLUSH_QUEUE_SIZE * sizeof(*queue->entries),
------ --- -- GFP_KERNEL);
------ --- -- if (!queue->entries) {
------ --- -- dma_ops_domain_free_flush_queue(dom);
------ --- -- return -ENOMEM;
------ --- -- }
------ --- --
------ --- -- spin_lock_init(&queue->lock);
------ --- -- }
------ --- --
------ --- -- return 0;
------ --- -- }
------ --- --
+ static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
+ {
------ --- -- atomic64_inc(&dom->flush_start_cnt);
+ domain_flush_tlb(&dom->domain);
+ domain_flush_complete(&dom->domain);
------ --- -- atomic64_inc(&dom->flush_finish_cnt);
------ --- - }
------ --- -
------ --- - static inline bool queue_ring_full(struct flush_queue *queue)
------ --- - {
------ --- - assert_spin_locked(&queue->lock);
------ --- -
------ --- - return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
---- }
----
---- #define queue_ring_for_each(i, q) \
---- for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
----
---- static inline unsigned queue_ring_add(struct flush_queue *queue)
---- {
---- unsigned idx = queue->tail;
----
---- assert_spin_locked(&queue->lock);
---- queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
----
---- return idx;
---- }
----
---- static inline void queue_ring_remove_head(struct flush_queue *queue)
---- {
---- assert_spin_locked(&queue->lock);
---- queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE;
---- }
----
---- static void queue_ring_free_flushed(struct dma_ops_domain *dom,
---- struct flush_queue *queue)
---- {
---- u64 counter = atomic64_read(&dom->flush_finish_cnt);
---- int idx;
----
---- queue_ring_for_each(idx, queue) {
---- /*
---- * This assumes that counter values in the ring-buffer are
---- * monotonously rising.
---- */
---- if (queue->entries[idx].counter >= counter)
---- break;
----
---- free_iova_fast(&dom->iovad,
---- queue->entries[idx].iova_pfn,
---- queue->entries[idx].pages);
----
---- queue_ring_remove_head(queue);
---- }
---- }
----
---- static void queue_add(struct dma_ops_domain *dom,
---- unsigned long address, unsigned long pages)
---- {
---- struct flush_queue *queue;
---- unsigned long flags;
---- int idx;
----
---- pages = __roundup_pow_of_two(pages);
---- address >>= PAGE_SHIFT;
----
---- queue = get_cpu_ptr(dom->flush_queue);
---- spin_lock_irqsave(&queue->lock, flags);
----
---- /*
---- * First remove the enries from the ring-buffer that are already
---- * flushed to make the below queue_ring_full() check less likely
---- */
---- queue_ring_free_flushed(dom, queue);
----
---- /*
---- * When ring-queue is full, flush the entries from the IOTLB so
---- * that we can free all entries with queue_ring_free_flushed()
---- * below.
---- */
---- if (queue_ring_full(queue)) {
---- dma_ops_domain_flush_tlb(dom);
---- queue_ring_free_flushed(dom, queue);
---- }
----
---- idx = queue_ring_add(queue);
----
---- queue->entries[idx].iova_pfn = address;
---- queue->entries[idx].pages = pages;
---- queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt);
----
---- spin_unlock_irqrestore(&queue->lock, flags);
----
---- if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0)
---- mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10));
----
---- put_cpu_ptr(dom->flush_queue);
+ }
+
- - --- - #define queue_ring_for_each(i, q) \
- - --- - for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
- - --- -
- - --- - static inline unsigned queue_ring_add(struct flush_queue *queue)
---- static void queue_flush_timeout(unsigned long data)
- static inline bool queue_ring_full(struct flush_queue *queue)
++++++ ++++++ static void iova_domain_flush_tlb(struct iova_domain *iovad)
+ {
- - --- - unsigned idx = queue->tail;
---- struct dma_ops_domain *dom = (struct dma_ops_domain *)data;
---- int cpu;
- assert_spin_locked(&queue->lock);
-
- return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
- }
++++++ ++++++ struct dma_ops_domain *dom;
+
- #define queue_ring_for_each(i, q) \
- for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
-
- static inline unsigned queue_ring_add(struct flush_queue *queue)
- {
- unsigned idx = queue->tail;
-
- - --- -- assert_spin_locked(&queue->lock);
- - --- -- queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
- - --- --
- - --- -- return idx;
- - --- -- }
- - --- --
- - --- -- static inline void queue_ring_remove_head(struct flush_queue *queue)
- - --- -- {
- - --- -- assert_spin_locked(&queue->lock);
- - --- -- queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE;
- - --- -- }
- - --- --
- - --- -- static void queue_ring_free_flushed(struct dma_ops_domain *dom,
- - --- -- struct flush_queue *queue)
- - --- -- {
- - --- -- u64 counter = atomic64_read(&dom->flush_finish_cnt);
- - --- -- int idx;
- - --- --
- - --- -- queue_ring_for_each(idx, queue) {
- - --- -- /*
- - --- -- * This assumes that counter values in the ring-buffer are
- - --- -- * monotonously rising.
- - --- -- */
- - --- -- if (queue->entries[idx].counter >= counter)
- - --- -- break;
- - --- --
- - --- -- free_iova_fast(&dom->iovad,
- - --- -- queue->entries[idx].iova_pfn,
- - --- -- queue->entries[idx].pages);
- - --- --
- - --- -- queue_ring_remove_head(queue);
- - --- -- }
- - --- -- }
- - --- --
- - --- -- static void queue_add(struct dma_ops_domain *dom,
- - --- -- unsigned long address, unsigned long pages)
- - --- -- {
- - --- -- struct flush_queue *queue;
- - --- -- unsigned long flags;
- - --- -- int idx;
- - --- --
- - --- -- pages = __roundup_pow_of_two(pages);
- - --- -- address >>= PAGE_SHIFT;
- - --- --
- - --- -- queue = get_cpu_ptr(dom->flush_queue);
- - --- -- spin_lock_irqsave(&queue->lock, flags);
- - --- --
- - --- -- /*
- - --- -- * First remove the enries from the ring-buffer that are already
- - --- -- * flushed to make the below queue_ring_full() check less likely
- - --- -- */
- - --- -- queue_ring_free_flushed(dom, queue);
- - --- --
- - --- -- /*
- - --- -- * When ring-queue is full, flush the entries from the IOTLB so
- - --- -- * that we can free all entries with queue_ring_free_flushed()
- - --- -- * below.
- - --- -- */
- - --- -- if (queue_ring_full(queue)) {
- - --- -- dma_ops_domain_flush_tlb(dom);
- - --- -- queue_ring_free_flushed(dom, queue);
- - --- -- }
- - --- --
- - --- -- idx = queue_ring_add(queue);
- - --- --
- - --- -- queue->entries[idx].iova_pfn = address;
- - --- -- queue->entries[idx].pages = pages;
- - --- -- queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt);
- - --- --
- - --- -- spin_unlock_irqrestore(&queue->lock, flags);
- - --- --
- - --- -- if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0)
- - --- -- mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10));
- - --- --
- - --- -- put_cpu_ptr(dom->flush_queue);
- - --- -- }
- - --- --
- - --- -- static void queue_flush_timeout(unsigned long data)
- - --- -- {
- - --- -- struct dma_ops_domain *dom = (struct dma_ops_domain *)data;
- - --- -- int cpu;
- - --- --
------ --- -- atomic_set(&dom->flush_timer_on, 0);
++++++ ++++++ dom = container_of(iovad, struct dma_ops_domain, iovad);
+
+ dma_ops_domain_flush_tlb(dom);
------ --- --
------ --- -- for_each_possible_cpu(cpu) {
------ --- -- struct flush_queue *queue;
------ --- -- unsigned long flags;
------ --- --
------ --- -- queue = per_cpu_ptr(dom->flush_queue, cpu);
------ --- -- spin_lock_irqsave(&queue->lock, flags);
------ --- -- queue_ring_free_flushed(dom, queue);
------ --- -- spin_unlock_irqrestore(&queue->lock, flags);
------ --- -- }
+ }
+
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
IOVA_START_PFN, DMA_32BIT_PFN);
------ --- -- /* Initialize reserved ranges */
------ --- -- copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
------ --- --
------ --- -- if (dma_ops_domain_alloc_flush_queue(dma_dom))
++++++ ++++++ if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
+ goto free_dma_dom;
+
------ --- -- setup_timer(&dma_dom->flush_timer, queue_flush_timeout,
------ --- -- (unsigned long)dma_dom);
------ --- --
------ --- -- atomic_set(&dma_dom->flush_timer_on, 0);
++++++ +++ ++ /* Initialize reserved ranges */
++++++ +++ ++ copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
add_domain_to_list(&dma_dom->domain);
flags |= tmp;
}
---------- --
---------- -- flags &= ~(DTE_FLAG_SA | 0xffffULL);
- flags &= ~(0xffffUL);
+++++++++++ ++ flags &= ~DEV_DOMID_MASK;
flags |= domain->id;
amd_iommu_dev_table[devid].data[1] = flags;