if (!its->is_v4)
continue;
+ if (!vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
return 0;
}
+static void its_map_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (!its_list_map)
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ /*
+ * If the VM wasn't mapped yet, iterate over the vpes and get
+ * them mapped now.
+ */
+ vm->vlpi_count[its->list_nr]++;
+
+ if (vm->vlpi_count[its->list_nr] == 1) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ struct its_vpe *vpe = vm->vpes[i];
+
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (!its_list_map)
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ if (!--vm->vlpi_count[its->list_nr]) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++)
+ its_send_vmapp(its, vm->vpes[i], false);
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
/* Already mapped, move it around */
its_send_vmovi(its_dev, event);
} else {
+ /* Ensure all the VPEs are mapped on this ITS */
+ its_map_vm(its_dev->its, info->map->vm);
+
/* Drop the physical mapping */
its_send_discard(its_dev, event);
LPI_PROP_ENABLED |
LPI_PROP_GROUP1));
+ /* Potentially unmap the VM from this ITS */
+ its_unmap_vm(its_dev->its, its_dev->event_map.vm);
+
/*
* Drop the refcount and make the device available again if
* this was the last VLPI.
if (!its->is_v4)
continue;
+ if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
its_send_vinvall(its, vpe);
}
}
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
+ /* If we use the list map, we issue VMAPP on demand... */
+ if (its_list_map)
+ return true;
+
/* Map the VPE to the first possible CPU */
vpe->col_idx = cpumask_first(cpu_online_mask);
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
+ /*
+ * If we use the list map, we unmap the VPE once no VLPIs are
+ * associated with the VM.
+ */
+ if (its_list_map)
+ return;
+
list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4)
continue;