arm64: alternatives: use tpidr_el2 on VHE hosts
authorJames Morse <james.morse@arm.com>
Mon, 8 Jan 2018 15:38:06 +0000 (15:38 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Sat, 13 Jan 2018 10:44:33 +0000 (10:44 +0000)
Now that KVM uses tpidr_el2 in the same way as Linux's cpu_offset in
tpidr_el1, merge the two. This saves KVM from save/restoring tpidr_el1
on VHE hosts, and allows future code to blindly access per-cpu variables
without triggering world-switch.

Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Christoffer Dall <cdall@linaro.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/percpu.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpufeature.c
arch/arm64/mm/proc.S

index 4a85c6952a221ec8e02df17b39b09ddf8f9c13d4..669028172fd6563ee5043841a5ecfebd928fb2da 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/stddef.h>
 #include <linux/stringify.h>
 
+extern int alternatives_applied;
+
 struct alt_instr {
        s32 orig_offset;        /* offset to original instruction */
        s32 alt_offset;         /* offset to replacement instruction */
index a6f90b648655777f41239392522402274fd37b9c..5dc4856f3bb99e70fa52c044545379dc7ef3a809 100644 (file)
@@ -254,7 +254,11 @@ lr .req    x30             // link register
 #else
        adr_l   \dst, \sym
 #endif
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        mrs     \tmp, tpidr_el1
+alternative_else
+       mrs     \tmp, tpidr_el2
+alternative_endif
        add     \dst, \dst, \tmp
        .endm
 
@@ -265,7 +269,11 @@ lr .req    x30             // link register
         */
        .macro ldr_this_cpu dst, sym, tmp
        adr_l   \dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        mrs     \tmp, tpidr_el1
+alternative_else
+       mrs     \tmp, tpidr_el2
+alternative_endif
        ldr     \dst, [\dst, \tmp]
        .endm
 
index 3bd498e4de4cf298c8a24bb5e97e235ebed17245..43393208229eb8d64ec476f5b7b098bc73f498f9 100644 (file)
 #ifndef __ASM_PERCPU_H
 #define __ASM_PERCPU_H
 
+#include <asm/alternative.h>
 #include <asm/stack_pointer.h>
 
 static inline void set_my_cpu_offset(unsigned long off)
 {
-       asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+       asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
+                                "msr tpidr_el2, %0",
+                                ARM64_HAS_VIRT_HOST_EXTN)
+                       :: "r" (off) : "memory");
 }
 
 static inline unsigned long __my_cpu_offset(void)
@@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void)
         * We want to allow caching the value, so avoid using volatile and
         * instead use a fake stack read to hazard against barrier().
         */
-       asm("mrs %0, tpidr_el1" : "=r" (off) :
+       asm(ALTERNATIVE("mrs %0, tpidr_el1",
+                       "mrs %0, tpidr_el2",
+                       ARM64_HAS_VIRT_HOST_EXTN)
+               : "=r" (off) :
                "Q" (*(const unsigned long *)current_stack_pointer));
 
        return off;
index 6dd0a3a3e5c98d447f7017c70b328a70bb82c9d3..414288a558c8e1fae52d9d39eb2b8d7991bd43c2 100644 (file)
@@ -32,6 +32,8 @@
 #define ALT_ORIG_PTR(a)                __ALT_PTR(a, orig_offset)
 #define ALT_REPL_PTR(a)                __ALT_PTR(a, alt_offset)
 
+int alternatives_applied;
+
 struct alt_region {
        struct alt_instr *begin;
        struct alt_instr *end;
@@ -143,7 +145,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
  */
 static int __apply_alternatives_multi_stop(void *unused)
 {
-       static int patched = 0;
        struct alt_region region = {
                .begin  = (struct alt_instr *)__alt_instructions,
                .end    = (struct alt_instr *)__alt_instructions_end,
@@ -151,14 +152,14 @@ static int __apply_alternatives_multi_stop(void *unused)
 
        /* We always have a CPU 0 at this point (__init) */
        if (smp_processor_id()) {
-               while (!READ_ONCE(patched))
+               while (!READ_ONCE(alternatives_applied))
                        cpu_relax();
                isb();
        } else {
-               BUG_ON(patched);
+               BUG_ON(alternatives_applied);
                __apply_alternatives(&region, true);
                /* Barriers provided by the cache flushing */
-               WRITE_ONCE(patched, 1);
+               WRITE_ONCE(alternatives_applied, 1);
        }
 
        return 0;
index da6722db50b0e1d76de652c3064c863c7a4b9bf5..9ef84d0def9aed482b556ebe339fd6213606d9cf 100644 (file)
@@ -886,6 +886,22 @@ static int __init parse_kpti(char *str)
 __setup("kpti=", parse_kpti);
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
+static int cpu_copy_el2regs(void *__unused)
+{
+       /*
+        * Copy register values that aren't redirected by hardware.
+        *
+        * Before code patching, we only set tpidr_el1, all CPUs need to copy
+        * this value to tpidr_el2 before we patch the code. Once we've done
+        * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
+        * do anything here.
+        */
+       if (!alternatives_applied)
+               write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+
+       return 0;
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -955,6 +971,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .capability = ARM64_HAS_VIRT_HOST_EXTN,
                .def_scope = SCOPE_SYSTEM,
                .matches = runs_at_el2,
+               .enable = cpu_copy_el2regs,
        },
        {
                .desc = "32-bit EL0 Support",
index bc86f7ef86204197ca7f70d3e6a6606b1ef3048e..5a59eea49395bed3021e376f9f18dee91fe867ce 100644 (file)
@@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend)
        mrs     x8, mdscr_el1
        mrs     x9, oslsr_el1
        mrs     x10, sctlr_el1
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        mrs     x11, tpidr_el1
+alternative_else
+       mrs     x11, tpidr_el2
+alternative_endif
        mrs     x12, sp_el0
        stp     x2, x3, [x0]
        stp     x4, xzr, [x0, #16]
@@ -116,7 +120,11 @@ ENTRY(cpu_do_resume)
        msr     mdscr_el1, x10
 
        msr     sctlr_el1, x12
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        msr     tpidr_el1, x13
+alternative_else
+       msr     tpidr_el2, x13
+alternative_endif
        msr     sp_el0, x14
        /*
         * Restore oslsr_el1 by writing oslar_el1