XIVE native exploitation interrupt mode. If not, it should run using
the legacy interrupt mode, referred as XICS (POWER7/8).
+* Device Mappings
+
+ The KVM device exposes different MMIO ranges of the XIVE HW which
+ are required for interrupt management. These are exposed to the
+ guest in VMAs populated with a custom VM fault handler.
+
+ 1. Thread Interrupt Management Area (TIMA)
+
+ Each thread has an associated Thread Interrupt Management context
+ composed of a set of registers. These registers let the thread
+ handle priority management and interrupt acknowledgment. The most
+ important are :
+
+ - Interrupt Pending Buffer (IPB)
+ - Current Processor Priority (CPPR)
+ - Notification Source Register (NSR)
+
+ They are exposed to software in four different pages each proposing
+ a view with a different privilege. The first page is for the
+ physical thread context and the second for the hypervisor. Only the
+ third (operating system) and the fourth (user level) are exposed the
+ guest.
+
* Groups:
1. KVM_DEV_XIVE_GRP_CTRL
* same offset regardless of where the code is executing
*/
extern void __iomem *xive_tima;
+extern unsigned long xive_tima_os;
/*
* Offset in the TM area of our current execution level (provided by
#define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001
+#define KVM_XIVE_TIMA_PAGE_OFFSET 0
+
#endif /* __LINUX_KVM_POWERPC_H */
return rc;
}
+static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+
+ switch (vmf->pgoff - vma->vm_pgoff) {
+ case 0: /* HW - forbid access */
+ case 1: /* HV - forbid access */
+ return VM_FAULT_SIGBUS;
+ case 2: /* OS */
+ vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
+ return VM_FAULT_NOPAGE;
+ case 3: /* USER - TODO */
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static const struct vm_operations_struct xive_native_tima_vmops = {
+ .fault = xive_native_tima_fault,
+};
+
+static int kvmppc_xive_native_mmap(struct kvm_device *dev,
+ struct vm_area_struct *vma)
+{
+ /* We only allow mappings at fixed offset for now */
+ if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
+ if (vma_pages(vma) > 4)
+ return -EINVAL;
+ vma->vm_ops = &xive_native_tima_vmops;
+ } else {
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
+ return 0;
+}
+
static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
u64 addr)
{
.set_attr = kvmppc_xive_native_set_attr,
.get_attr = kvmppc_xive_native_get_attr,
.has_attr = kvmppc_xive_native_has_attr,
+ .mmap = kvmppc_xive_native_mmap,
};
void kvmppc_xive_native_init_module(void)
}
EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
+unsigned long xive_tima_os;
+EXPORT_SYMBOL_GPL(xive_tima_os);
+
bool __init xive_native_init(void)
{
struct device_node *np;
for_each_possible_cpu(cpu)
kvmppc_set_xive_tima(cpu, r.start, tima);
+ /* Resource 2 is OS window */
+ if (of_address_to_resource(np, 2, &r)) {
+ pr_err("Failed to get thread mgmnt area resource\n");
+ return false;
+ }
+
+ xive_tima_os = r.start;
+
/* Grab size of provisionning pages */
xive_parse_provisioning(np);