select HAVE_DEBUG_STACKOVERFLOW
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HASH
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_JUMP_LABEL_H
+#define _ASM_PARISC_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/assembly.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "b,n %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif
obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Helge Deller <deller@gmx.de>
+ *
+ * Based on arch/arm64/kernel/jump_label.c
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/patch.h>
+
+static inline int reassemble_17(int as17)
+{
+ return (((as17 & 0x10000) >> 16) |
+ ((as17 & 0x0f800) << 5) |
+ ((as17 & 0x00400) >> 8) |
+ ((as17 & 0x003ff) << 3));
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ void *addr = (void *)jump_entry_code(entry);
+ u32 insn;
+
+ if (type == JUMP_LABEL_JMP) {
+ void *target = (void *)jump_entry_target(entry);
+ int distance = target - addr;
+ /*
+ * Encode the PA1.1 "b,n" instruction with a 17-bit
+ * displacement. In case we hit the BUG(), we could use
+ * another branch instruction with a 22-bit displacement on
+ * 64-bit CPUs instead. But this seems sufficient for now.
+ */
+ distance -= 8;
+ BUG_ON(distance > 262143 || distance < -262144);
+ insn = 0xe8000002 | reassemble_17(distance >> 2);
+ } else {
+ insn = INSN_NOP;
+ }
+
+ patch_text(addr, insn);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use the architected NOP in arch_static_branch, so there's no
+ * need to patch an identical NOP over the top of it here. The core
+ * will call arch_jump_label_transform from a module notifier if the
+ * NOP needs to be replaced by a branch.
+ */
+}
*(.data..vm0.pgd) \
*(.data..vm0.pte)
+/* No __ro_after_init data in the .rodata section - which will always be ro */
+#define RO_AFTER_INIT_DATA
+
#include <asm-generic/vmlinux.lds.h>
/* needed for the processor specific cache alignment size */