x86/cpufeature: Replace the old static_cpu_has() with safe variant
authorBorislav Petkov <bp@suse.de>
Tue, 26 Jan 2016 21:12:05 +0000 (22:12 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 30 Jan 2016 10:22:18 +0000 (11:22 +0100)
So the old one didn't work properly before alternatives had run.
And it was supposed to provide an optimized JMP because the
assumption was that the offset it is jumping to is within a
signed byte and thus a two-byte JMP.

So I did an x86_64 allyesconfig build and dumped all possible
sites where static_cpu_has() was used. The optimization amounted
to all in all 12(!) places where static_cpu_has() had generated
a 2-byte JMP. Which has saved us a whopping 36 bytes!

This clearly is not worth the trouble so we can remove it. The
only place where the optimization might count - in __switch_to()
- we will handle differently. But that's not subject of this
patch.

Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1453842730-28463-6-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/Kconfig.debug
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/fpu/internal.h
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/vm86_32.c
drivers/cpufreq/intel_pstate.c
fs/btrfs/disk-io.c

index 9b18ed97a8a2968b7b293bc83484d8311d830e83..68a2d1f0a6832bdbfafd9ed4122f370252f5e4d1 100644 (file)
@@ -350,16 +350,6 @@ config DEBUG_IMR_SELFTEST
 
          If unsure say N here.
 
-config X86_DEBUG_STATIC_CPU_HAS
-       bool "Debug alternatives"
-       depends on DEBUG_KERNEL
-       ---help---
-         This option causes additional code to be generated which
-         fails if static_cpu_has() is used before alternatives have
-         run.
-
-         If unsure, say N.
-
 config X86_DEBUG_FPU
        bool "Debug the x86 FPU code"
        depends on DEBUG_KERNEL
index 3cce9f3c5cb1d9dc1f8495479644e4742351393e..a261cf2e79075995f60077086b6cd03b5d569af1 100644 (file)
@@ -125,103 +125,19 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
 #define cpu_has_osxsave                boot_cpu_has(X86_FEATURE_OSXSAVE)
 #define cpu_has_hypervisor     boot_cpu_has(X86_FEATURE_HYPERVISOR)
 /*
- * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
+ * Do not add any more of those clumsy macros - use static_cpu_has() for
  * fast paths and boot_cpu_has() otherwise!
  */
 
 #if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
-extern void warn_pre_alternatives(void);
-extern bool __static_cpu_has_safe(u16 bit);
+extern bool __static_cpu_has(u16 bit);
 
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
  * These are only valid after alternatives have run, but will statically
  * patch the target code for additional performance.
  */
-static __always_inline __pure bool __static_cpu_has(u16 bit)
-{
-#ifdef CC_HAVE_ASM_GOTO
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-
-               /*
-                * Catch too early usage of this before alternatives
-                * have run.
-                */
-               asm_volatile_goto("1: jmp %l[t_warn]\n"
-                        "2:\n"
-                        ".section .altinstructions,\"a\"\n"
-                        " .long 1b - .\n"
-                        " .long 0\n"           /* no replacement */
-                        " .word %P0\n"         /* 1: do replace */
-                        " .byte 2b - 1b\n"     /* source len */
-                        " .byte 0\n"           /* replacement len */
-                        " .byte 0\n"           /* pad len */
-                        ".previous\n"
-                        /* skipping size check since replacement size = 0 */
-                        : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
-
-#endif
-
-               asm_volatile_goto("1: jmp %l[t_no]\n"
-                        "2:\n"
-                        ".section .altinstructions,\"a\"\n"
-                        " .long 1b - .\n"
-                        " .long 0\n"           /* no replacement */
-                        " .word %P0\n"         /* feature bit */
-                        " .byte 2b - 1b\n"     /* source len */
-                        " .byte 0\n"           /* replacement len */
-                        " .byte 0\n"           /* pad len */
-                        ".previous\n"
-                        /* skipping size check since replacement size = 0 */
-                        : : "i" (bit) : : t_no);
-               return true;
-       t_no:
-               return false;
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-       t_warn:
-               warn_pre_alternatives();
-               return false;
-#endif
-
-#else /* CC_HAVE_ASM_GOTO */
-
-               u8 flag;
-               /* Open-coded due to __stringify() in ALTERNATIVE() */
-               asm volatile("1: movb $0,%0\n"
-                            "2:\n"
-                            ".section .altinstructions,\"a\"\n"
-                            " .long 1b - .\n"
-                            " .long 3f - .\n"
-                            " .word %P1\n"             /* feature bit */
-                            " .byte 2b - 1b\n"         /* source len */
-                            " .byte 4f - 3f\n"         /* replacement len */
-                            " .byte 0\n"               /* pad len */
-                            ".previous\n"
-                            ".section .discard,\"aw\",@progbits\n"
-                            " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
-                            ".previous\n"
-                            ".section .altinstr_replacement,\"ax\"\n"
-                            "3: movb $1,%0\n"
-                            "4:\n"
-                            ".previous\n"
-                            : "=qm" (flag) : "i" (bit));
-               return flag;
-
-#endif /* CC_HAVE_ASM_GOTO */
-}
-
-#define static_cpu_has(bit)                                    \
-(                                                              \
-       __builtin_constant_p(boot_cpu_has(bit)) ?               \
-               boot_cpu_has(bit) :                             \
-       __builtin_constant_p(bit) ?                             \
-               __static_cpu_has(bit) :                         \
-               boot_cpu_has(bit)                               \
-)
-
-static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+static __always_inline __pure bool _static_cpu_has(u16 bit)
 {
 #ifdef CC_HAVE_ASM_GOTO
                asm_volatile_goto("1: jmp %l[t_dynamic]\n"
@@ -255,7 +171,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
        t_no:
                return false;
        t_dynamic:
-               return __static_cpu_has_safe(bit);
+               return __static_cpu_has(bit);
 #else
                u8 flag;
                /* Open-coded due to __stringify() in ALTERNATIVE() */
@@ -293,22 +209,21 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
                             ".previous\n"
                             : "=qm" (flag)
                             : "i" (bit), "i" (X86_FEATURE_ALWAYS));
-               return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
+               return (flag == 2 ? __static_cpu_has(bit) : flag);
 #endif /* CC_HAVE_ASM_GOTO */
 }
 
-#define static_cpu_has_safe(bit)                               \
+#define static_cpu_has(bit)                                    \
 (                                                              \
        __builtin_constant_p(boot_cpu_has(bit)) ?               \
                boot_cpu_has(bit) :                             \
-               _static_cpu_has_safe(bit)                       \
+               _static_cpu_has(bit)                            \
 )
 #else
 /*
  * gcc 3.x is too stupid to do the static test; fall back to dynamic.
  */
 #define static_cpu_has(bit)            boot_cpu_has(bit)
-#define static_cpu_has_safe(bit)       boot_cpu_has(bit)
 #endif
 
 #define cpu_has_bug(c, bit)            cpu_has(c, (bit))
@@ -316,7 +231,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
 #define clear_cpu_bug(c, bit)          clear_cpu_cap(c, (bit))
 
 #define static_cpu_has_bug(bit)                static_cpu_has((bit))
-#define static_cpu_has_bug_safe(bit)   static_cpu_has_safe((bit))
 #define boot_cpu_has_bug(bit)          cpu_has_bug(&boot_cpu_data, (bit))
 
 #define MAX_CPU_FEATURES               (NCAPINTS * 32)
index d01199def78160934c6647dea9de38a226ff951c..c2e46eb96b6d8449c66482005dcad47ccf88e7f4 100644 (file)
@@ -59,22 +59,22 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
  */
 static __always_inline __pure bool use_eager_fpu(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+       return static_cpu_has(X86_FEATURE_EAGER_FPU);
 }
 
 static __always_inline __pure bool use_xsaveopt(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+       return static_cpu_has(X86_FEATURE_XSAVEOPT);
 }
 
 static __always_inline __pure bool use_xsave(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_XSAVE);
+       return static_cpu_has(X86_FEATURE_XSAVE);
 }
 
 static __always_inline __pure bool use_fxsr(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_FXSR);
+       return static_cpu_has(X86_FEATURE_FXSR);
 }
 
 /*
@@ -301,7 +301,7 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
 
        WARN_ON(system_state != SYSTEM_BOOTING);
 
-       if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+       if (static_cpu_has(X86_FEATURE_XSAVES))
                XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
@@ -323,7 +323,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
 
        WARN_ON(system_state != SYSTEM_BOOTING);
 
-       if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+       if (static_cpu_has(X86_FEATURE_XSAVES))
                XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -461,7 +461,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
         * pending. Clear the x87 state here by setting it to fixed values.
         * "m" is a random variable that should be in L1.
         */
-       if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
+       if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
                asm volatile(
                        "fnclex\n\t"
                        "emms\n\t"
index c80c02c6ec4944a85715f4438443e5987df4f612..ab5c2c685a3ce908c3677262a35fd402ba77a445 100644 (file)
@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
        unsigned long value;
        unsigned int id = (x >> 24) & 0xff;
 
-       if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+       if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
                rdmsrl(MSR_FAM10H_NODE_ID, value);
                id |= (value << 2) & 0xff00;
        }
@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
        this_cpu_write(cpu_llc_id, node);
 
        /* Account for nodes per socket in multi-core-module processors */
-       if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+       if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
                rdmsrl(MSR_FAM10H_NODE_ID, val);
                nodes = ((val >> 3) & 7) + 1;
        }
index 37830de8f60a8f0d8f8da27d409da72ce1f13551..ee499817f3f5310fbbe5a94d3133c8deea72fd1b 100644 (file)
@@ -1475,19 +1475,11 @@ void cpu_init(void)
 }
 #endif
 
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-void warn_pre_alternatives(void)
-{
-       WARN(1, "You're using static_cpu_has before alternatives have run!\n");
-}
-EXPORT_SYMBOL_GPL(warn_pre_alternatives);
-#endif
-
-inline bool __static_cpu_has_safe(u16 bit)
+inline bool __static_cpu_has(u16 bit)
 {
        return boot_cpu_has(bit);
 }
-EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
+EXPORT_SYMBOL_GPL(__static_cpu_has);
 
 static void bsp_resume(void)
 {
index e574b85465185fe273a839093452c26521ac3d1e..3dce1ca0a653091967f7089ce9e89c9d54399408 100644 (file)
@@ -362,7 +362,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
        /* make room for real-mode segments */
        tsk->thread.sp0 += 16;
 
-       if (static_cpu_has_safe(X86_FEATURE_SEP))
+       if (static_cpu_has(X86_FEATURE_SEP))
                tsk->thread.sysenter_cs = 0;
 
        load_sp0(tss, &tsk->thread);
index cd83d477e32d412394da574e8e02adb6dd7be832..3a4b39afc0abb0cb9a7fc34845266176c95487a3 100644 (file)
@@ -1431,7 +1431,7 @@ static int __init intel_pstate_init(void)
        if (!all_cpu_data)
                return -ENOMEM;
 
-       if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
+       if (static_cpu_has(X86_FEATURE_HWP) && !no_hwp) {
                pr_info("intel_pstate: HWP enabled\n");
                hwp_active++;
        }
index dd08e29f51177d27859c12f0d173224a24a1f4a9..d9286497924f398faf49853ab45ded4c88017e01 100644 (file)
@@ -930,7 +930,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
        if (bio_flags & EXTENT_BIO_TREE_LOG)
                return 0;
 #ifdef CONFIG_X86
-       if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
+       if (static_cpu_has(X86_FEATURE_XMM4_2))
                return 0;
 #endif
        return 1;