arm64: kconfig: Ensure spinlock fastpaths are inlined if !PREEMPT
authorWill Deacon <will.deacon@arm.com>
Tue, 13 Mar 2018 21:17:01 +0000 (21:17 +0000)
committerWill Deacon <will.deacon@arm.com>
Thu, 5 Jul 2018 09:05:06 +0000 (10:05 +0100)
When running with CONFIG_PREEMPT=n, the spinlock fastpaths fit inside
64 bytes, which typically coincides with the L1 I-cache line size.

Inline the spinlock fastpaths, like we do already for rwlocks.

Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/Kconfig

index facd19625563a9d2c403ef1c8fa653ee69eff021..476de9b1d239a9b888ebd19215a04582f8e50280 100644 (file)
@@ -42,6 +42,16 @@ config ARM64
        select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
        select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
        select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
+       select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
+       select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
+       select ARCH_INLINE_SPIN_LOCK if !PREEMPT
+       select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
+       select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
+       select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
+       select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
+       select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
+       select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
+       select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS