LINUX KERNEL DUMP TEST MODULE (LKDTM)
M: Kees Cook <keescook@chromium.org>
S: Maintained
-F: drivers/misc/lkdtm*
+F: drivers/misc/lkdtm/*
LINUX SECURITY MODULE (LSM) FRAMEWORK
M: Chris Wright <chrisw@sous-sol.org>
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
-obj-$(CONFIG_LKDTM) += lkdtm.o
+obj-$(CONFIG_LKDTM) += lkdtm/
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
obj-$(CONFIG_MISC_RTSX) += cardreader/
-
-lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
-lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
-lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o
-lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o
-lkdtm-$(CONFIG_LKDTM) += lkdtm_refcount.o
-lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o
-lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o
-
-KCOV_INSTRUMENT_lkdtm_rodata.o := n
-
-OBJCOPYFLAGS :=
-OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
- --set-section-flags .text=alloc,readonly \
- --rename-section .text=.rodata
-targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
-$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
- $(call if_changed,objcopy)
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LKDTM_H
-#define __LKDTM_H
-
-#define pr_fmt(fmt) "lkdtm: " fmt
-
-#include <linux/kernel.h>
-
-/* lkdtm_bugs.c */
-void __init lkdtm_bugs_init(int *recur_param);
-void lkdtm_PANIC(void);
-void lkdtm_BUG(void);
-void lkdtm_WARNING(void);
-void lkdtm_EXCEPTION(void);
-void lkdtm_LOOP(void);
-void lkdtm_OVERFLOW(void);
-void lkdtm_CORRUPT_STACK(void);
-void lkdtm_CORRUPT_STACK_STRONG(void);
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
-void lkdtm_SOFTLOCKUP(void);
-void lkdtm_HARDLOCKUP(void);
-void lkdtm_SPINLOCKUP(void);
-void lkdtm_HUNG_TASK(void);
-void lkdtm_CORRUPT_LIST_ADD(void);
-void lkdtm_CORRUPT_LIST_DEL(void);
-void lkdtm_CORRUPT_USER_DS(void);
-void lkdtm_STACK_GUARD_PAGE_LEADING(void);
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
-
-/* lkdtm_heap.c */
-void lkdtm_OVERWRITE_ALLOCATION(void);
-void lkdtm_WRITE_AFTER_FREE(void);
-void lkdtm_READ_AFTER_FREE(void);
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
-void lkdtm_READ_BUDDY_AFTER_FREE(void);
-
-/* lkdtm_perms.c */
-void __init lkdtm_perms_init(void);
-void lkdtm_WRITE_RO(void);
-void lkdtm_WRITE_RO_AFTER_INIT(void);
-void lkdtm_WRITE_KERN(void);
-void lkdtm_EXEC_DATA(void);
-void lkdtm_EXEC_STACK(void);
-void lkdtm_EXEC_KMALLOC(void);
-void lkdtm_EXEC_VMALLOC(void);
-void lkdtm_EXEC_RODATA(void);
-void lkdtm_EXEC_USERSPACE(void);
-void lkdtm_ACCESS_USERSPACE(void);
-
-/* lkdtm_refcount.c */
-void lkdtm_REFCOUNT_INC_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_DEC_ZERO(void);
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_INC_ZERO(void);
-void lkdtm_REFCOUNT_ADD_ZERO(void);
-void lkdtm_REFCOUNT_INC_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_SATURATED(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_TIMING(void);
-void lkdtm_ATOMIC_TIMING(void);
-
-/* lkdtm_rodata.c */
-void lkdtm_rodata_do_nothing(void);
-
-/* lkdtm_usercopy.c */
-void __init lkdtm_usercopy_init(void);
-void __exit lkdtm_usercopy_exit(void);
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
-void lkdtm_USERCOPY_STACK_FRAME_TO(void);
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
-void lkdtm_USERCOPY_STACK_BEYOND(void);
-void lkdtm_USERCOPY_KERNEL(void);
-
-#endif
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LKDTM) += lkdtm.o
+
+lkdtm-$(CONFIG_LKDTM) += core.o
+lkdtm-$(CONFIG_LKDTM) += bugs.o
+lkdtm-$(CONFIG_LKDTM) += heap.o
+lkdtm-$(CONFIG_LKDTM) += perms.o
+lkdtm-$(CONFIG_LKDTM) += refcount.o
+lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
+lkdtm-$(CONFIG_LKDTM) += usercopy.o
+
+KCOV_INSTRUMENT_rodata.o := n
+
+OBJCOPYFLAGS :=
+OBJCOPYFLAGS_rodata_objcopy.o := \
+ --set-section-flags .text=alloc,readonly \
+ --rename-section .text=.rodata
+targets += rodata.o rodata_objcopy.o
+$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
+ $(call if_changed,objcopy)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to logic bugs (e.g. bad dereferences,
+ * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
+ * lockups) along with other things that don't fit well into existing LKDTM
+ * test source files.
+ */
+#include "lkdtm.h"
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
+#include <linux/uaccess.h>
+
+struct lkdtm_list {
+ struct list_head node;
+};
+
+/*
+ * Make sure our attempts to over run the kernel stack doesn't trigger
+ * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
+ * recurse past the end of THREAD_SIZE by default.
+ */
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
+#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
+#else
+#define REC_STACK_SIZE (THREAD_SIZE / 8)
+#endif
+#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
+
+static int recur_count = REC_NUM_DEFAULT;
+
+static DEFINE_SPINLOCK(lock_me_up);
+
+static int recursive_loop(int remaining)
+{
+ char buf[REC_STACK_SIZE];
+
+ /* Make sure compiler does not optimize this away. */
+ memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
+ if (!remaining)
+ return 0;
+ else
+ return recursive_loop(remaining - 1);
+}
+
+/* If the depth is negative, use the default, otherwise keep parameter. */
+void __init lkdtm_bugs_init(int *recur_param)
+{
+ if (*recur_param < 0)
+ *recur_param = recur_count;
+ else
+ recur_count = *recur_param;
+}
+
+void lkdtm_PANIC(void)
+{
+ panic("dumptest");
+}
+
+void lkdtm_BUG(void)
+{
+ BUG();
+}
+
+static int warn_counter;
+
+void lkdtm_WARNING(void)
+{
+ WARN(1, "Warning message trigger count: %d\n", warn_counter++);
+}
+
+void lkdtm_EXCEPTION(void)
+{
+ *((volatile int *) 0) = 0;
+}
+
+void lkdtm_LOOP(void)
+{
+ for (;;)
+ ;
+}
+
+void lkdtm_OVERFLOW(void)
+{
+ (void) recursive_loop(recur_count);
+}
+
+static noinline void __lkdtm_CORRUPT_STACK(void *stack)
+{
+ memset(stack, '\xff', 64);
+}
+
+/* This should trip the stack canary, not corrupt the return address. */
+noinline void lkdtm_CORRUPT_STACK(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8] __aligned(sizeof(void *));
+
+ __lkdtm_CORRUPT_STACK(&data);
+
+ pr_info("Corrupted stack containing char array ...\n");
+}
+
+/* Same as above but will only get a canary with -fstack-protector-strong */
+noinline void lkdtm_CORRUPT_STACK_STRONG(void)
+{
+ union {
+ unsigned short shorts[4];
+ unsigned long *ptr;
+ } data __aligned(sizeof(void *));
+
+ __lkdtm_CORRUPT_STACK(&data);
+
+ pr_info("Corrupted stack containing union ...\n");
+}
+
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+{
+ static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
+ u32 *p;
+ u32 val = 0x12345678;
+
+ p = (u32 *)(data + 1);
+ if (*p == 0)
+ val = 0x87654321;
+ *p = val;
+}
+
+void lkdtm_SOFTLOCKUP(void)
+{
+ preempt_disable();
+ for (;;)
+ cpu_relax();
+}
+
+void lkdtm_HARDLOCKUP(void)
+{
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+}
+
+void lkdtm_SPINLOCKUP(void)
+{
+ /* Must be called twice to trigger. */
+ spin_lock(&lock_me_up);
+ /* Let sparse know we intended to exit holding the lock. */
+ __release(&lock_me_up);
+}
+
+void lkdtm_HUNG_TASK(void)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+}
+
+void lkdtm_CORRUPT_LIST_ADD(void)
+{
+ /*
+ * Initially, an empty list via LIST_HEAD:
+ * test_head.next = &test_head
+ * test_head.prev = &test_head
+ */
+ LIST_HEAD(test_head);
+ struct lkdtm_list good, bad;
+ void *target[2] = { };
+ void *redirection = ⌖
+
+ pr_info("attempting good list addition\n");
+
+ /*
+ * Adding to the list performs these actions:
+ * test_head.next->prev = &good.node
+ * good.node.next = test_head.next
+ * good.node.prev = test_head
+ * test_head.next = good.node
+ */
+ list_add(&good.node, &test_head);
+
+ pr_info("attempting corrupted list addition\n");
+ /*
+ * In simulating this "write what where" primitive, the "what" is
+ * the address of &bad.node, and the "where" is the address held
+ * by "redirection".
+ */
+ test_head.next = redirection;
+ list_add(&bad.node, &test_head);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_add() corruption not detected!\n");
+}
+
+void lkdtm_CORRUPT_LIST_DEL(void)
+{
+ LIST_HEAD(test_head);
+ struct lkdtm_list item;
+ void *target[2] = { };
+ void *redirection = ⌖
+
+ list_add(&item.node, &test_head);
+
+ pr_info("attempting good list removal\n");
+ list_del(&item.node);
+
+ pr_info("attempting corrupted list removal\n");
+ list_add(&item.node, &test_head);
+
+ /* As with the list_add() test above, this corrupts "next". */
+ item.node.next = redirection;
+ list_del(&item.node);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_del() corruption not detected!\n");
+}
+
+/* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
+void lkdtm_CORRUPT_USER_DS(void)
+{
+ pr_info("setting bad task size limit\n");
+ set_fs(KERNEL_DS);
+
+ /* Make sure we do not keep running with a KERNEL_DS! */
+ force_sig(SIGKILL, current);
+}
+
+/* Test that VMAP_STACK is actually allocating with a leading guard page */
+void lkdtm_STACK_GUARD_PAGE_LEADING(void)
+{
+ const unsigned char *stack = task_stack_page(current);
+ const unsigned char *ptr = stack - 1;
+ volatile unsigned char byte;
+
+ pr_info("attempting bad read from page below current stack\n");
+
+ byte = *ptr;
+
+ pr_err("FAIL: accessed page before stack!\n");
+}
+
+/* Test that VMAP_STACK is actually allocating with a trailing guard page */
+void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
+{
+ const unsigned char *stack = task_stack_page(current);
+ const unsigned char *ptr = stack + THREAD_SIZE;
+ volatile unsigned char byte;
+
+ pr_info("attempting bad read from page above current stack\n");
+
+ byte = *ptr;
+
+ pr_err("FAIL: accessed page after stack!\n");
+}
--- /dev/null
+/*
+ * Linux Kernel Dump Test Module for testing kernel crashes conditions:
+ * induces system failures at predefined crashpoints and under predefined
+ * operational conditions in order to evaluate the reliability of kernel
+ * sanity checking and crash dumps obtained using different dumping
+ * solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Ankita Garg <ankita@in.ibm.com>
+ *
+ * It is adapted from the Linux Kernel Dump Test Tool by
+ * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
+ *
+ * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * See Documentation/fault-injection/provoke-crashes.txt for instructions
+ */
+#include "lkdtm.h"
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/kprobes.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/debugfs.h>
+
+#ifdef CONFIG_IDE
+#include <linux/ide.h>
+#endif
+
+#define DEFAULT_COUNT 10
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off);
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off);
+
+#ifdef CONFIG_KPROBES
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off);
+# define CRASHPOINT_KPROBE(_symbol) \
+ .kprobe = { \
+ .symbol_name = (_symbol), \
+ .pre_handler = lkdtm_kprobe_handler, \
+ },
+# define CRASHPOINT_WRITE(_symbol) \
+ (_symbol) ? lkdtm_debugfs_entry : direct_entry
+#else
+# define CRASHPOINT_KPROBE(_symbol)
+# define CRASHPOINT_WRITE(_symbol) direct_entry
+#endif
+
+/* Crash points */
+struct crashpoint {
+ const char *name;
+ const struct file_operations fops;
+ struct kprobe kprobe;
+};
+
+#define CRASHPOINT(_name, _symbol) \
+ { \
+ .name = _name, \
+ .fops = { \
+ .read = lkdtm_debugfs_read, \
+ .llseek = generic_file_llseek, \
+ .open = lkdtm_debugfs_open, \
+ .write = CRASHPOINT_WRITE(_symbol) \
+ }, \
+ CRASHPOINT_KPROBE(_symbol) \
+ }
+
+/* Define the possible places where we can trigger a crash point. */
+static struct crashpoint crashpoints[] = {
+ CRASHPOINT("DIRECT", NULL),
+#ifdef CONFIG_KPROBES
+ CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
+ CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"),
+ CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
+ CRASHPOINT("FS_DEVRW", "ll_rw_block"),
+ CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
+ CRASHPOINT("TIMERADD", "hrtimer_start"),
+ CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
+# ifdef CONFIG_IDE
+ CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
+# endif
+#endif
+};
+
+
+/* Crash types. */
+struct crashtype {
+ const char *name;
+ void (*func)(void);
+};
+
+#define CRASHTYPE(_name) \
+ { \
+ .name = __stringify(_name), \
+ .func = lkdtm_ ## _name, \
+ }
+
+/* Define the possible types of crashes that can be triggered. */
+static const struct crashtype crashtypes[] = {
+ CRASHTYPE(PANIC),
+ CRASHTYPE(BUG),
+ CRASHTYPE(WARNING),
+ CRASHTYPE(EXCEPTION),
+ CRASHTYPE(LOOP),
+ CRASHTYPE(OVERFLOW),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
+ CRASHTYPE(CORRUPT_USER_DS),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(STACK_GUARD_PAGE_LEADING),
+ CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
+ CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(OVERWRITE_ALLOCATION),
+ CRASHTYPE(WRITE_AFTER_FREE),
+ CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+ CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SOFTLOCKUP),
+ CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SPINLOCKUP),
+ CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(EXEC_DATA),
+ CRASHTYPE(EXEC_STACK),
+ CRASHTYPE(EXEC_KMALLOC),
+ CRASHTYPE(EXEC_VMALLOC),
+ CRASHTYPE(EXEC_RODATA),
+ CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(WRITE_RO),
+ CRASHTYPE(WRITE_RO_AFTER_INIT),
+ CRASHTYPE(WRITE_KERN),
+ CRASHTYPE(REFCOUNT_INC_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_DEC_ZERO),
+ CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_INC_ZERO),
+ CRASHTYPE(REFCOUNT_ADD_ZERO),
+ CRASHTYPE(REFCOUNT_INC_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_SATURATED),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_TIMING),
+ CRASHTYPE(ATOMIC_TIMING),
+ CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
+ CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
+ CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
+ CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
+ CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+ CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+ CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_KERNEL),
+};
+
+
+/* Global kprobe entry and crashtype. */
+static struct kprobe *lkdtm_kprobe;
+static struct crashpoint *lkdtm_crashpoint;
+static const struct crashtype *lkdtm_crashtype;
+
+/* Module parameters */
+static int recur_count = -1;
+module_param(recur_count, int, 0644);
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
+
+static char* cpoint_name;
+module_param(cpoint_name, charp, 0444);
+MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+
+static char* cpoint_type;
+module_param(cpoint_type, charp, 0444);
+MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
+ "hitting the crash point");
+
+static int cpoint_count = DEFAULT_COUNT;
+module_param(cpoint_count, int, 0644);
+MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
+ "crash point is to be hit to trigger action");
+
+
+/* Return the crashtype number or NULL if the name is invalid */
+static const struct crashtype *find_crashtype(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+ if (!strcmp(name, crashtypes[i].name))
+ return &crashtypes[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * This is forced noinline just so it distinctly shows up in the stackdump
+ * which makes validation of expected lkdtm crashes easier.
+ */
+static noinline void lkdtm_do_action(const struct crashtype *crashtype)
+{
+ if (WARN_ON(!crashtype || !crashtype->func))
+ return;
+ crashtype->func();
+}
+
+static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
+ const struct crashtype *crashtype)
+{
+ int ret;
+
+ /* If this doesn't have a symbol, just call immediately. */
+ if (!crashpoint->kprobe.symbol_name) {
+ lkdtm_do_action(crashtype);
+ return 0;
+ }
+
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
+
+ lkdtm_crashpoint = crashpoint;
+ lkdtm_crashtype = crashtype;
+ lkdtm_kprobe = &crashpoint->kprobe;
+ ret = register_kprobe(lkdtm_kprobe);
+ if (ret < 0) {
+ pr_info("Couldn't register kprobe %s\n",
+ crashpoint->kprobe.symbol_name);
+ lkdtm_kprobe = NULL;
+ lkdtm_crashpoint = NULL;
+ lkdtm_crashtype = NULL;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_KPROBES
+/* Global crash counter and spinlock. */
+static int crash_count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(crash_count_lock);
+
+/* Called by kprobe entry points. */
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+{
+ unsigned long flags;
+ bool do_it = false;
+
+ if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype))
+ return 0;
+
+ spin_lock_irqsave(&crash_count_lock, flags);
+ crash_count--;
+ pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
+ lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
+
+ if (crash_count == 0) {
+ do_it = true;
+ crash_count = cpoint_count;
+ }
+ spin_unlock_irqrestore(&crash_count_lock, flags);
+
+ if (do_it)
+ lkdtm_do_action(lkdtm_crashtype);
+
+ return 0;
+}
+
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct crashpoint *crashpoint = file_inode(f)->i_private;
+ const struct crashtype *crashtype = NULL;
+ char *buf;
+ int err;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long)buf);
+
+ if (!crashtype)
+ return -EINVAL;
+
+ err = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (err < 0)
+ return err;
+
+ *off += count;
+
+ return count;
+}
+#endif
+
+/* Generic read callback that just prints out the available crash types */
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ char *buf;
+ int i, n, out;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+ n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ crashtypes[i].name);
+ }
+ buf[n] = '\0';
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, n);
+ free_page((unsigned long) buf);
+
+ return out;
+}
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* Special entry to just crash directly. Available without KPROBEs */
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ const struct crashtype *crashtype;
+ char *buf;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+ if (count < 1)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long) buf);
+ if (!crashtype)
+ return -EINVAL;
+
+ pr_info("Performing direct entry %s\n", crashtype->name);
+ lkdtm_do_action(crashtype);
+ *off += count;
+
+ return count;
+}
+
+static struct dentry *lkdtm_debugfs_root;
+
+static int __init lkdtm_module_init(void)
+{
+ struct crashpoint *crashpoint = NULL;
+ const struct crashtype *crashtype = NULL;
+ int ret = -EINVAL;
+ int i;
+
+ /* Neither or both of these need to be set */
+ if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
+ pr_err("Need both cpoint_type and cpoint_name or neither\n");
+ return -EINVAL;
+ }
+
+ if (cpoint_type) {
+ crashtype = find_crashtype(cpoint_type);
+ if (!crashtype) {
+ pr_err("Unknown crashtype '%s'\n", cpoint_type);
+ return -EINVAL;
+ }
+ }
+
+ if (cpoint_name) {
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ if (!strcmp(cpoint_name, crashpoints[i].name))
+ crashpoint = &crashpoints[i];
+ }
+
+ /* Refuse unknown crashpoints. */
+ if (!crashpoint) {
+ pr_err("Invalid crashpoint %s\n", cpoint_name);
+ return -EINVAL;
+ }
+ }
+
+#ifdef CONFIG_KPROBES
+ /* Set crash count. */
+ crash_count = cpoint_count;
+#endif
+
+ /* Handle test-specific initialization. */
+ lkdtm_bugs_init(&recur_count);
+ lkdtm_perms_init();
+ lkdtm_usercopy_init();
+
+ /* Register debugfs interface */
+ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
+ if (!lkdtm_debugfs_root) {
+ pr_err("creating root dir failed\n");
+ return -ENODEV;
+ }
+
+ /* Install debugfs trigger files. */
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ struct crashpoint *cur = &crashpoints[i];
+ struct dentry *de;
+
+ de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
+ cur, &cur->fops);
+ if (de == NULL) {
+ pr_err("could not create crashpoint %s\n", cur->name);
+ goto out_err;
+ }
+ }
+
+ /* Install crashpoint if one was selected. */
+ if (crashpoint) {
+ ret = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (ret < 0) {
+ pr_info("Invalid crashpoint %s\n", crashpoint->name);
+ goto out_err;
+ }
+ pr_info("Crash point %s of type %s registered\n",
+ crashpoint->name, cpoint_type);
+ } else {
+ pr_info("No crash points registered, enable through debugfs\n");
+ }
+
+ return 0;
+
+out_err:
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+ return ret;
+}
+
+static void __exit lkdtm_module_exit(void)
+{
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+
+ /* Handle test-specific clean-up. */
+ lkdtm_usercopy_exit();
+
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
+
+ pr_info("Crash point unregistered\n");
+}
+
+module_init(lkdtm_module_init);
+module_exit(lkdtm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kernel crash testing module");
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests relating directly to heap memory, including
+ * page allocation and slab allocations.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+/*
+ * This tries to stay within the next largest power-of-2 kmalloc cache
+ * to avoid actually overwriting anything important if it's not detected
+ * correctly.
+ */
+void lkdtm_OVERWRITE_ALLOCATION(void)
+{
+ size_t len = 1020;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ return;
+
+ data[1024 / sizeof(u32)] = 0x12345678;
+ kfree(data);
+}
+
+void lkdtm_WRITE_AFTER_FREE(void)
+{
+ int *base, *again;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base)
+ return;
+ pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+ pr_info("Attempting bad write to freed memory at %p\n",
+ &base[offset]);
+ kfree(base);
+ base[offset] = 0x0abcdef0;
+ /* Attempt to notice the overwrite. */
+ again = kmalloc(len, GFP_KERNEL);
+ kfree(again);
+ if (again != base)
+ pr_info("Hmm, didn't get the same memory range.\n");
+}
+
+void lkdtm_READ_AFTER_FREE(void)
+{
+ int *base, *val, saw;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base) {
+ pr_info("Unable to allocate base memory.\n");
+ return;
+ }
+
+ val = kmalloc(len, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ kfree(base);
+ return;
+ }
+
+ *val = 0x12345678;
+ base[offset] = *val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Memory was not poisoned\n");
+
+ kfree(val);
+}
+
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ pr_info("Writing to the buddy page before free\n");
+ memset((void *)p, 0x3, PAGE_SIZE);
+ free_page(p);
+ schedule();
+ pr_info("Attempting bad write to the buddy page after free\n");
+ memset((void *)p, 0x78, PAGE_SIZE);
+ /* Attempt to notice the overwrite. */
+ p = __get_free_page(GFP_KERNEL);
+ free_page(p);
+ schedule();
+}
+
+void lkdtm_READ_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ int saw, *val;
+ int *base;
+
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ free_page(p);
+ return;
+ }
+
+ base = (int *)p;
+
+ *val = 0x12345678;
+ base[0] = *val;
+ pr_info("Value in memory before free: %x\n", base[0]);
+ free_page(p);
+ pr_info("Attempting to read from freed memory\n");
+ saw = base[0];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Buddy page was not poisoned\n");
+
+ kfree(val);
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LKDTM_H
+#define __LKDTM_H
+
+#define pr_fmt(fmt) "lkdtm: " fmt
+
+#include <linux/kernel.h>
+
+/* lkdtm_bugs.c */
+void __init lkdtm_bugs_init(int *recur_param);
+void lkdtm_PANIC(void);
+void lkdtm_BUG(void);
+void lkdtm_WARNING(void);
+void lkdtm_EXCEPTION(void);
+void lkdtm_LOOP(void);
+void lkdtm_OVERFLOW(void);
+void lkdtm_CORRUPT_STACK(void);
+void lkdtm_CORRUPT_STACK_STRONG(void);
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
+void lkdtm_SOFTLOCKUP(void);
+void lkdtm_HARDLOCKUP(void);
+void lkdtm_SPINLOCKUP(void);
+void lkdtm_HUNG_TASK(void);
+void lkdtm_CORRUPT_LIST_ADD(void);
+void lkdtm_CORRUPT_LIST_DEL(void);
+void lkdtm_CORRUPT_USER_DS(void);
+void lkdtm_STACK_GUARD_PAGE_LEADING(void);
+void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
+
+/* lkdtm_heap.c */
+void lkdtm_OVERWRITE_ALLOCATION(void);
+void lkdtm_WRITE_AFTER_FREE(void);
+void lkdtm_READ_AFTER_FREE(void);
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
+void lkdtm_READ_BUDDY_AFTER_FREE(void);
+
+/* lkdtm_perms.c */
+void __init lkdtm_perms_init(void);
+void lkdtm_WRITE_RO(void);
+void lkdtm_WRITE_RO_AFTER_INIT(void);
+void lkdtm_WRITE_KERN(void);
+void lkdtm_EXEC_DATA(void);
+void lkdtm_EXEC_STACK(void);
+void lkdtm_EXEC_KMALLOC(void);
+void lkdtm_EXEC_VMALLOC(void);
+void lkdtm_EXEC_RODATA(void);
+void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_ACCESS_USERSPACE(void);
+
+/* lkdtm_refcount.c */
+void lkdtm_REFCOUNT_INC_OVERFLOW(void);
+void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
+void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void);
+void lkdtm_REFCOUNT_DEC_ZERO(void);
+void lkdtm_REFCOUNT_DEC_NEGATIVE(void);
+void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void);
+void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void);
+void lkdtm_REFCOUNT_INC_ZERO(void);
+void lkdtm_REFCOUNT_ADD_ZERO(void);
+void lkdtm_REFCOUNT_INC_SATURATED(void);
+void lkdtm_REFCOUNT_DEC_SATURATED(void);
+void lkdtm_REFCOUNT_ADD_SATURATED(void);
+void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void);
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void);
+void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void);
+void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
+void lkdtm_REFCOUNT_TIMING(void);
+void lkdtm_ATOMIC_TIMING(void);
+
+/* lkdtm_rodata.c */
+void lkdtm_rodata_do_nothing(void);
+
+/* lkdtm_usercopy.c */
+void __init lkdtm_usercopy_init(void);
+void __exit lkdtm_usercopy_exit(void);
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
+void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
+void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
+void lkdtm_USERCOPY_STACK_FRAME_TO(void);
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
+void lkdtm_USERCOPY_STACK_BEYOND(void);
+void lkdtm_USERCOPY_KERNEL(void);
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to validating kernel memory
+ * permissions: non-executable regions, non-writable regions, and
+ * even non-readable regions.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/* Whether or not to fill the target memory area with do_nothing(). */
+#define CODE_WRITE true
+#define CODE_AS_IS false
+
+/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
+#define EXEC_SIZE 64
+
+/* This is non-const, so it will end up in the .data section. */
+static u8 data_area[EXEC_SIZE];
+
+/* This is cost, so it will end up in the .rodata section. */
+static const unsigned long rodata = 0xAA55AA55;
+
+/* This is marked __ro_after_init, so it should ultimately be .rodata. */
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
+
+/*
+ * This just returns to the caller. It is designed to be copied into
+ * non-executable memory regions.
+ */
+static void do_nothing(void)
+{
+ return;
+}
+
+/* Must immediately follow do_nothing for size calculuations to work out. */
+static void do_overwritten(void)
+{
+ pr_info("do_overwritten wasn't overwritten!\n");
+ return;
+}
+
+static noinline void execute_location(void *dst, bool write)
+{
+ void (*func)(void) = dst;
+
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
+ if (write == CODE_WRITE) {
+ memcpy(dst, do_nothing, EXEC_SIZE);
+ flush_icache_range((unsigned long)dst,
+ (unsigned long)dst + EXEC_SIZE);
+ }
+ pr_info("attempting bad execution at %p\n", func);
+ func();
+}
+
+static void execute_user_location(void *dst)
+{
+ int copied;
+
+ /* Intentionally crossing kernel/user memory boundary. */
+ void (*func)(void) = dst;
+
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
+ copied = access_process_vm(current, (unsigned long)dst, do_nothing,
+ EXEC_SIZE, FOLL_WRITE);
+ if (copied < EXEC_SIZE)
+ return;
+ pr_info("attempting bad execution at %p\n", func);
+ func();
+}
+
+void lkdtm_WRITE_RO(void)
+{
+ /* Explicitly cast away "const" for the test. */
+ unsigned long *ptr = (unsigned long *)&rodata;
+
+ pr_info("attempting bad rodata write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
+}
+
+void lkdtm_WRITE_RO_AFTER_INIT(void)
+{
+ unsigned long *ptr = &ro_after_init;
+
+ /*
+ * Verify we were written to during init. Since an Oops
+ * is considered a "success", a failure is to just skip the
+ * real test.
+ */
+ if ((*ptr & 0xAA) != 0xAA) {
+ pr_info("%p was NOT written during init!?\n", ptr);
+ return;
+ }
+
+ pr_info("attempting bad ro_after_init write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
+}
+
+void lkdtm_WRITE_KERN(void)
+{
+ size_t size;
+ unsigned char *ptr;
+
+ size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
+ ptr = (unsigned char *)do_overwritten;
+
+ pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+ memcpy(ptr, (unsigned char *)do_nothing, size);
+ flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+
+ do_overwritten();
+}
+
+void lkdtm_EXEC_DATA(void)
+{
+ execute_location(data_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_STACK(void)
+{
+ u8 stack_area[EXEC_SIZE];
+ execute_location(stack_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_KMALLOC(void)
+{
+ u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
+ execute_location(kmalloc_area, CODE_WRITE);
+ kfree(kmalloc_area);
+}
+
+void lkdtm_EXEC_VMALLOC(void)
+{
+ u32 *vmalloc_area = vmalloc(EXEC_SIZE);
+ execute_location(vmalloc_area, CODE_WRITE);
+ vfree(vmalloc_area);
+}
+
+void lkdtm_EXEC_RODATA(void)
+{
+ execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
+}
+
+void lkdtm_EXEC_USERSPACE(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+ execute_user_location((void *)user_addr);
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void lkdtm_ACCESS_USERSPACE(void)
+{
+ unsigned long user_addr, tmp = 0;
+ unsigned long *ptr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
+ pr_warn("copy_to_user failed\n");
+ vm_munmap(user_addr, PAGE_SIZE);
+ return;
+ }
+
+ ptr = (unsigned long *)user_addr;
+
+ pr_info("attempting bad read at %p\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %p\n", ptr);
+ *ptr = tmp;
+
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_perms_init(void)
+{
+ /* Make sure we can write to __ro_after_init values during __init */
+ ro_after_init |= 0xAA;
+
+}
--- /dev/null
+/*
+ * This is for all the tests related to refcount bugs (e.g. overflow,
+ * underflow, reaching zero untested, etc).
+ */
+#include "lkdtm.h"
+#include <linux/refcount.h>
+
+#ifdef CONFIG_REFCOUNT_FULL
+#define REFCOUNT_MAX (UINT_MAX - 1)
+#define REFCOUNT_SATURATED UINT_MAX
+#else
+#define REFCOUNT_MAX INT_MAX
+#define REFCOUNT_SATURATED (INT_MIN / 2)
+#endif
+
+static void overflow_check(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Overflow detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Overflow detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() above the maximum value of the refcount implementation,
+ * should at least saturate, and at most also WARN.
+ */
+void lkdtm_REFCOUNT_INC_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
+
+ pr_info("attempting good refcount_inc() without overflow\n");
+ refcount_dec(&over);
+ refcount_inc(&over);
+
+ pr_info("attempting bad refcount_inc() overflow\n");
+ refcount_inc(&over);
+ refcount_inc(&over);
+
+ overflow_check(&over);
+}
+
+/* refcount_add() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
+
+ pr_info("attempting good refcount_add() without overflow\n");
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_add(4, &over);
+
+ pr_info("attempting bad refcount_add() overflow\n");
+ refcount_add(4, &over);
+
+ overflow_check(&over);
+}
+
+/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
+
+ pr_info("attempting bad refcount_inc_not_zero() overflow\n");
+ if (!refcount_inc_not_zero(&over))
+ pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
+
+ overflow_check(&over);
+}
+
+/* refcount_add_not_zero() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
+
+ pr_info("attempting bad refcount_add_not_zero() overflow\n");
+ if (!refcount_add_not_zero(6, &over))
+ pr_warn("Weird: refcount_add_not_zero() reported zero\n");
+
+ overflow_check(&over);
+}
+
+static void check_zero(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Zero detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Zero detected: unsafely reset to max\n");
+ break;
+ case 0:
+ pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits
+ * zero it should either saturate (when inc-from-zero isn't protected)
+ * or stay at zero (when inc-from-zero is protected) and should WARN for both.
+ */
+void lkdtm_REFCOUNT_DEC_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(2);
+
+ pr_info("attempting good refcount_dec()\n");
+ refcount_dec(&zero);
+
+ pr_info("attempting bad refcount_dec() to zero\n");
+ refcount_dec(&zero);
+
+ check_zero(&zero);
+}
+
+static void check_negative(refcount_t *ref, int start)
+{
+ /*
+ * CONFIG_REFCOUNT_FULL refuses to move a refcount at all on an
+ * over-sub, so we have to track our starting position instead of
+ * looking only at zero-pinning.
+ */
+ if (refcount_read(ref) == start) {
+ pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n",
+ start);
+ return;
+ }
+
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Negative detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Negative detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/* A refcount_dec() going negative should saturate and may WARN. */
+void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_dec() below zero\n");
+ refcount_dec(&neg);
+
+ check_negative(&neg, 0);
+}
+
+/*
+ * A refcount_dec_and_test() should act like refcount_dec() above when
+ * going negative.
+ */
+void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_dec_and_test() below zero\n");
+ if (refcount_dec_and_test(&neg))
+ pr_warn("Weird: refcount_dec_and_test() reported zero\n");
+
+ check_negative(&neg, 0);
+}
+
+/*
+ * A refcount_sub_and_test() should act like refcount_dec_and_test()
+ * above when going negative.
+ */
+void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(3);
+
+ pr_info("attempting bad refcount_sub_and_test() below zero\n");
+ if (refcount_sub_and_test(5, &neg))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_negative(&neg, 3);
+}
+
+static void check_from_zero(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case 0:
+ pr_info("Zero detected: stayed at zero\n");
+ break;
+ case REFCOUNT_SATURATED:
+ pr_info("Zero detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Zero detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_info("Fail: zero not detected, incremented to %d\n",
+ refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() from zero should pin to zero or saturate and may WARN.
+ * Only CONFIG_REFCOUNT_FULL provides this protection currently.
+ */
+void lkdtm_REFCOUNT_INC_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting safe refcount_inc_not_zero() from zero\n");
+ if (!refcount_inc_not_zero(&zero)) {
+ pr_info("Good: zero detected\n");
+ if (refcount_read(&zero) == 0)
+ pr_info("Correctly stayed at zero\n");
+ else
+ pr_err("Fail: refcount went past zero!\n");
+ } else {
+ pr_err("Fail: Zero not detected!?\n");
+ }
+
+ pr_info("attempting bad refcount_inc() from zero\n");
+ refcount_inc(&zero);
+
+ check_from_zero(&zero);
+}
+
+/*
+ * A refcount_add() should act like refcount_inc() above when starting
+ * at zero.
+ */
+void lkdtm_REFCOUNT_ADD_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting safe refcount_add_not_zero() from zero\n");
+ if (!refcount_add_not_zero(3, &zero)) {
+ pr_info("Good: zero detected\n");
+ if (refcount_read(&zero) == 0)
+ pr_info("Correctly stayed at zero\n");
+ else
+ pr_err("Fail: refcount went past zero\n");
+ } else {
+ pr_err("Fail: Zero not detected!?\n");
+ }
+
+ pr_info("attempting bad refcount_add() from zero\n");
+ refcount_add(3, &zero);
+
+ check_from_zero(&zero);
+}
+
+static void check_saturated(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Saturation detected: still saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Saturation detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() from a saturated value should at most warn about
+ * being saturated already.
+ */
+void lkdtm_REFCOUNT_INC_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_inc() from saturated\n");
+ refcount_inc(&sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_DEC_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec() from saturated\n");
+ refcount_dec(&sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_ADD_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec() from saturated\n");
+ refcount_add(8, &sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_inc_not_zero() from saturated\n");
+ if (!refcount_inc_not_zero(&sat))
+ pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_add_not_zero() from saturated\n");
+ if (!refcount_add_not_zero(7, &sat))
+ pr_warn("Weird: refcount_add_not_zero() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec_and_test() from saturated\n");
+ if (refcount_dec_and_test(&sat))
+ pr_warn("Weird: refcount_dec_and_test() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_sub_and_test() from saturated\n");
+ if (refcount_sub_and_test(8, &sat))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Used to time the existing atomic_t when used for reference counting */
+void lkdtm_ATOMIC_TIMING(void)
+{
+ unsigned int i;
+ atomic_t count = ATOMIC_INIT(1);
+
+ for (i = 0; i < INT_MAX - 1; i++)
+ atomic_inc(&count);
+
+ for (i = INT_MAX; i > 0; i--)
+ if (atomic_dec_and_test(&count))
+ break;
+
+ if (i != 1)
+ pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1);
+ else
+ pr_info("atomic timing: done\n");
+}
+
+/*
+ * This can be compared to ATOMIC_TIMING when implementing fast refcount
+ * protections. Looking at the number of CPU cycles tells the real story
+ * about performance. For example:
+ * cd /sys/kernel/debug/provoke-crash
+ * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
+ */
+void lkdtm_REFCOUNT_TIMING(void)
+{
+ unsigned int i;
+ refcount_t count = REFCOUNT_INIT(1);
+
+ for (i = 0; i < INT_MAX - 1; i++)
+ refcount_inc(&count);
+
+ for (i = INT_MAX; i > 0; i--)
+ if (refcount_dec_and_test(&count))
+ break;
+
+ if (i != 1)
+ pr_err("refcount: out of sync up/down cycle: %u\n", i - 1);
+ else
+ pr_info("refcount timing: done\n");
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This includes functions that are meant to live entirely in .rodata
+ * (via objcopy tricks), to validate the non-executability of .rodata.
+ */
+#include "lkdtm.h"
+
+void notrace lkdtm_rodata_do_nothing(void)
+{
+ /* Does nothing. We just want an architecture agnostic "return". */
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to copy_to_user() and copy_from_user()
+ * hardening.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Many of the tests here end up using const sizes, but those would
+ * normally be ignored by hardened usercopy, so force the compiler
+ * into choosing the non-const path to make sure we trigger the
+ * hardened usercopy checks by added "unconst" to all the const copies,
+ * and making sure "cache_size" isn't optimized into a const.
+ */
+static volatile size_t unconst = 0;
+static volatile size_t cache_size = 1024;
+static struct kmem_cache *whitelist_cache;
+
+static const unsigned char test_text[] = "This is a test.\n";
+
+/*
+ * Instead of adding -Wno-return-local-addr, just pass the stack address
+ * through a function to obfuscate it from the compiler.
+ */
+static noinline unsigned char *trick_compiler(unsigned char *stack)
+{
+ return stack + 0;
+}
+
+static noinline unsigned char *do_usercopy_stack_callee(int value)
+{
+ unsigned char buf[32];
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(buf); i++) {
+ buf[i] = value & 0xff;
+ }
+
+ return trick_compiler(buf);
+}
+
+static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
+{
+ unsigned long user_addr;
+ unsigned char good_stack[32];
+ unsigned char *bad_stack;
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(good_stack); i++)
+ good_stack[i] = test_text[i % sizeof(test_text)];
+
+ /* This is a pointer to outside our current stack frame. */
+ if (bad_frame) {
+ bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
+ } else {
+ /* Put start address just inside stack. */
+ bad_stack = task_stack_page(current) + THREAD_SIZE;
+ bad_stack -= sizeof(unsigned long);
+ }
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of local stack\n");
+ if (copy_to_user((void __user *)user_addr, good_stack,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of distant stack\n");
+ if (copy_to_user((void __user *)user_addr, bad_stack,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ /*
+ * There isn't a safe way to not be protected by usercopy
+ * if we're going to write to another thread's stack.
+ */
+ if (!bad_frame)
+ goto free_user;
+
+ pr_info("attempting good copy_from_user of local stack\n");
+ if (copy_from_user(good_stack, (void __user *)user_addr,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of distant stack\n");
+ if (copy_from_user(bad_stack, (void __user *)user_addr,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+/*
+ * This checks for whole-object size validation with hardened usercopy,
+ * with or without usercopy whitelisting.
+ */
+static void do_usercopy_heap_size(bool to_user)
+{
+ unsigned long user_addr;
+ unsigned char *one, *two;
+ void __user *test_user_addr;
+ void *test_kern_addr;
+ size_t size = unconst + 1024;
+
+ one = kmalloc(size, GFP_KERNEL);
+ two = kmalloc(size, GFP_KERNEL);
+ if (!one || !two) {
+ pr_warn("Failed to allocate kernel memory\n");
+ goto free_kernel;
+ }
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_kernel;
+ }
+
+ memset(one, 'A', size);
+ memset(two, 'B', size);
+
+ test_user_addr = (void __user *)(user_addr + 16);
+ test_kern_addr = one + 16;
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of correct size\n");
+ if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of too large size\n");
+ if (copy_to_user(test_user_addr, test_kern_addr, size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user of correct size\n");
+ if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of too large size\n");
+ if (copy_from_user(test_kern_addr, test_user_addr, size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+free_kernel:
+ kfree(one);
+ kfree(two);
+}
+
+/*
+ * This checks for the specific whitelist window within an object. If this
+ * test passes, then do_usercopy_heap_size() tests will pass too.
+ */
+static void do_usercopy_heap_whitelist(bool to_user)
+{
+ unsigned long user_alloc;
+ unsigned char *buf = NULL;
+ unsigned char __user *user_addr;
+ size_t offset, size;
+
+ /* Make sure cache was prepared. */
+ if (!whitelist_cache) {
+ pr_warn("Failed to allocate kernel cache\n");
+ return;
+ }
+
+ /*
+ * Allocate a buffer with a whitelisted window in the buffer.
+ */
+ buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
+ if (!buf) {
+ pr_warn("Failed to allocate buffer from whitelist cache\n");
+ goto free_alloc;
+ }
+
+ /* Allocate user memory we'll poke at. */
+ user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_alloc >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_alloc;
+ }
+ user_addr = (void __user *)user_alloc;
+
+ memset(buf, 'B', cache_size);
+
+ /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
+ offset = (cache_size / 4) + unconst;
+ size = (cache_size / 16) + unconst;
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user inside whitelist\n");
+ if (copy_to_user(user_addr, buf + offset, size)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user outside whitelist\n");
+ if (copy_to_user(user_addr, buf + offset - 1, size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user inside whitelist\n");
+ if (copy_from_user(buf + offset, user_addr, size)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user outside whitelist\n");
+ if (copy_from_user(buf + offset - 1, user_addr, size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_alloc, PAGE_SIZE);
+free_alloc:
+ if (buf)
+ kmem_cache_free(whitelist_cache, buf);
+}
+
+/* Callable tests. */
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+{
+ do_usercopy_heap_size(true);
+}
+
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+{
+ do_usercopy_heap_size(false);
+}
+
+void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
+{
+ do_usercopy_heap_whitelist(true);
+}
+
+void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
+{
+ do_usercopy_heap_whitelist(false);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+{
+ do_usercopy_stack(true, true);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+{
+ do_usercopy_stack(false, true);
+}
+
+void lkdtm_USERCOPY_STACK_BEYOND(void)
+{
+ do_usercopy_stack(true, false);
+}
+
+void lkdtm_USERCOPY_KERNEL(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ pr_info("attempting good copy_to_user from kernel rodata\n");
+ if (copy_to_user((void __user *)user_addr, test_text,
+ unconst + sizeof(test_text))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user from kernel text\n");
+ if (copy_to_user((void __user *)user_addr, vm_mmap,
+ unconst + PAGE_SIZE)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_usercopy_init(void)
+{
+ /* Prepare cache that lacks SLAB_USERCOPY flag. */
+ whitelist_cache =
+ kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
+ 0, 0,
+ cache_size / 4,
+ cache_size / 16,
+ NULL);
+}
+
+void __exit lkdtm_usercopy_exit(void)
+{
+ kmem_cache_destroy(whitelist_cache);
+}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is for all the tests related to logic bugs (e.g. bad dereferences,
- * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
- * lockups) along with other things that don't fit well into existing LKDTM
- * test source files.
- */
-#include "lkdtm.h"
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/task_stack.h>
-#include <linux/uaccess.h>
-
-struct lkdtm_list {
- struct list_head node;
-};
-
-/*
- * Make sure our attempts to over run the kernel stack doesn't trigger
- * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
- * recurse past the end of THREAD_SIZE by default.
- */
-#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
-#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
-#else
-#define REC_STACK_SIZE (THREAD_SIZE / 8)
-#endif
-#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
-
-static int recur_count = REC_NUM_DEFAULT;
-
-static DEFINE_SPINLOCK(lock_me_up);
-
-static int recursive_loop(int remaining)
-{
- char buf[REC_STACK_SIZE];
-
- /* Make sure compiler does not optimize this away. */
- memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
- if (!remaining)
- return 0;
- else
- return recursive_loop(remaining - 1);
-}
-
-/* If the depth is negative, use the default, otherwise keep parameter. */
-void __init lkdtm_bugs_init(int *recur_param)
-{
- if (*recur_param < 0)
- *recur_param = recur_count;
- else
- recur_count = *recur_param;
-}
-
-void lkdtm_PANIC(void)
-{
- panic("dumptest");
-}
-
-void lkdtm_BUG(void)
-{
- BUG();
-}
-
-static int warn_counter;
-
-void lkdtm_WARNING(void)
-{
- WARN(1, "Warning message trigger count: %d\n", warn_counter++);
-}
-
-void lkdtm_EXCEPTION(void)
-{
- *((volatile int *) 0) = 0;
-}
-
-void lkdtm_LOOP(void)
-{
- for (;;)
- ;
-}
-
-void lkdtm_OVERFLOW(void)
-{
- (void) recursive_loop(recur_count);
-}
-
-static noinline void __lkdtm_CORRUPT_STACK(void *stack)
-{
- memset(stack, '\xff', 64);
-}
-
-/* This should trip the stack canary, not corrupt the return address. */
-noinline void lkdtm_CORRUPT_STACK(void)
-{
- /* Use default char array length that triggers stack protection. */
- char data[8] __aligned(sizeof(void *));
-
- __lkdtm_CORRUPT_STACK(&data);
-
- pr_info("Corrupted stack containing char array ...\n");
-}
-
-/* Same as above but will only get a canary with -fstack-protector-strong */
-noinline void lkdtm_CORRUPT_STACK_STRONG(void)
-{
- union {
- unsigned short shorts[4];
- unsigned long *ptr;
- } data __aligned(sizeof(void *));
-
- __lkdtm_CORRUPT_STACK(&data);
-
- pr_info("Corrupted stack containing union ...\n");
-}
-
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
-{
- static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
- u32 *p;
- u32 val = 0x12345678;
-
- p = (u32 *)(data + 1);
- if (*p == 0)
- val = 0x87654321;
- *p = val;
-}
-
-void lkdtm_SOFTLOCKUP(void)
-{
- preempt_disable();
- for (;;)
- cpu_relax();
-}
-
-void lkdtm_HARDLOCKUP(void)
-{
- local_irq_disable();
- for (;;)
- cpu_relax();
-}
-
-void lkdtm_SPINLOCKUP(void)
-{
- /* Must be called twice to trigger. */
- spin_lock(&lock_me_up);
- /* Let sparse know we intended to exit holding the lock. */
- __release(&lock_me_up);
-}
-
-void lkdtm_HUNG_TASK(void)
-{
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
-}
-
-void lkdtm_CORRUPT_LIST_ADD(void)
-{
- /*
- * Initially, an empty list via LIST_HEAD:
- * test_head.next = &test_head
- * test_head.prev = &test_head
- */
- LIST_HEAD(test_head);
- struct lkdtm_list good, bad;
- void *target[2] = { };
- void *redirection = ⌖
-
- pr_info("attempting good list addition\n");
-
- /*
- * Adding to the list performs these actions:
- * test_head.next->prev = &good.node
- * good.node.next = test_head.next
- * good.node.prev = test_head
- * test_head.next = good.node
- */
- list_add(&good.node, &test_head);
-
- pr_info("attempting corrupted list addition\n");
- /*
- * In simulating this "write what where" primitive, the "what" is
- * the address of &bad.node, and the "where" is the address held
- * by "redirection".
- */
- test_head.next = redirection;
- list_add(&bad.node, &test_head);
-
- if (target[0] == NULL && target[1] == NULL)
- pr_err("Overwrite did not happen, but no BUG?!\n");
- else
- pr_err("list_add() corruption not detected!\n");
-}
-
-void lkdtm_CORRUPT_LIST_DEL(void)
-{
- LIST_HEAD(test_head);
- struct lkdtm_list item;
- void *target[2] = { };
- void *redirection = ⌖
-
- list_add(&item.node, &test_head);
-
- pr_info("attempting good list removal\n");
- list_del(&item.node);
-
- pr_info("attempting corrupted list removal\n");
- list_add(&item.node, &test_head);
-
- /* As with the list_add() test above, this corrupts "next". */
- item.node.next = redirection;
- list_del(&item.node);
-
- if (target[0] == NULL && target[1] == NULL)
- pr_err("Overwrite did not happen, but no BUG?!\n");
- else
- pr_err("list_del() corruption not detected!\n");
-}
-
-/* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
-void lkdtm_CORRUPT_USER_DS(void)
-{
- pr_info("setting bad task size limit\n");
- set_fs(KERNEL_DS);
-
- /* Make sure we do not keep running with a KERNEL_DS! */
- force_sig(SIGKILL, current);
-}
-
-/* Test that VMAP_STACK is actually allocating with a leading guard page */
-void lkdtm_STACK_GUARD_PAGE_LEADING(void)
-{
- const unsigned char *stack = task_stack_page(current);
- const unsigned char *ptr = stack - 1;
- volatile unsigned char byte;
-
- pr_info("attempting bad read from page below current stack\n");
-
- byte = *ptr;
-
- pr_err("FAIL: accessed page before stack!\n");
-}
-
-/* Test that VMAP_STACK is actually allocating with a trailing guard page */
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
-{
- const unsigned char *stack = task_stack_page(current);
- const unsigned char *ptr = stack + THREAD_SIZE;
- volatile unsigned char byte;
-
- pr_info("attempting bad read from page above current stack\n");
-
- byte = *ptr;
-
- pr_err("FAIL: accessed page after stack!\n");
-}
+++ /dev/null
-/*
- * Linux Kernel Dump Test Module for testing kernel crashes conditions:
- * induces system failures at predefined crashpoints and under predefined
- * operational conditions in order to evaluate the reliability of kernel
- * sanity checking and crash dumps obtained using different dumping
- * solutions.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Ankita Garg <ankita@in.ibm.com>
- *
- * It is adapted from the Linux Kernel Dump Test Tool by
- * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
- *
- * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
- *
- * See Documentation/fault-injection/provoke-crashes.txt for instructions
- */
-#include "lkdtm.h"
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/buffer_head.h>
-#include <linux/kprobes.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/slab.h>
-#include <scsi/scsi_cmnd.h>
-#include <linux/debugfs.h>
-
-#ifdef CONFIG_IDE
-#include <linux/ide.h>
-#endif
-
-#define DEFAULT_COUNT 10
-
-static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
-static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
- size_t count, loff_t *off);
-static ssize_t direct_entry(struct file *f, const char __user *user_buf,
- size_t count, loff_t *off);
-
-#ifdef CONFIG_KPROBES
-static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
-static ssize_t lkdtm_debugfs_entry(struct file *f,
- const char __user *user_buf,
- size_t count, loff_t *off);
-# define CRASHPOINT_KPROBE(_symbol) \
- .kprobe = { \
- .symbol_name = (_symbol), \
- .pre_handler = lkdtm_kprobe_handler, \
- },
-# define CRASHPOINT_WRITE(_symbol) \
- (_symbol) ? lkdtm_debugfs_entry : direct_entry
-#else
-# define CRASHPOINT_KPROBE(_symbol)
-# define CRASHPOINT_WRITE(_symbol) direct_entry
-#endif
-
-/* Crash points */
-struct crashpoint {
- const char *name;
- const struct file_operations fops;
- struct kprobe kprobe;
-};
-
-#define CRASHPOINT(_name, _symbol) \
- { \
- .name = _name, \
- .fops = { \
- .read = lkdtm_debugfs_read, \
- .llseek = generic_file_llseek, \
- .open = lkdtm_debugfs_open, \
- .write = CRASHPOINT_WRITE(_symbol) \
- }, \
- CRASHPOINT_KPROBE(_symbol) \
- }
-
-/* Define the possible places where we can trigger a crash point. */
-static struct crashpoint crashpoints[] = {
- CRASHPOINT("DIRECT", NULL),
-#ifdef CONFIG_KPROBES
- CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
- CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"),
- CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
- CRASHPOINT("FS_DEVRW", "ll_rw_block"),
- CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
- CRASHPOINT("TIMERADD", "hrtimer_start"),
- CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
-# ifdef CONFIG_IDE
- CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
-# endif
-#endif
-};
-
-
-/* Crash types. */
-struct crashtype {
- const char *name;
- void (*func)(void);
-};
-
-#define CRASHTYPE(_name) \
- { \
- .name = __stringify(_name), \
- .func = lkdtm_ ## _name, \
- }
-
-/* Define the possible types of crashes that can be triggered. */
-static const struct crashtype crashtypes[] = {
- CRASHTYPE(PANIC),
- CRASHTYPE(BUG),
- CRASHTYPE(WARNING),
- CRASHTYPE(EXCEPTION),
- CRASHTYPE(LOOP),
- CRASHTYPE(OVERFLOW),
- CRASHTYPE(CORRUPT_LIST_ADD),
- CRASHTYPE(CORRUPT_LIST_DEL),
- CRASHTYPE(CORRUPT_USER_DS),
- CRASHTYPE(CORRUPT_STACK),
- CRASHTYPE(CORRUPT_STACK_STRONG),
- CRASHTYPE(STACK_GUARD_PAGE_LEADING),
- CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
- CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
- CRASHTYPE(OVERWRITE_ALLOCATION),
- CRASHTYPE(WRITE_AFTER_FREE),
- CRASHTYPE(READ_AFTER_FREE),
- CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
- CRASHTYPE(READ_BUDDY_AFTER_FREE),
- CRASHTYPE(SOFTLOCKUP),
- CRASHTYPE(HARDLOCKUP),
- CRASHTYPE(SPINLOCKUP),
- CRASHTYPE(HUNG_TASK),
- CRASHTYPE(EXEC_DATA),
- CRASHTYPE(EXEC_STACK),
- CRASHTYPE(EXEC_KMALLOC),
- CRASHTYPE(EXEC_VMALLOC),
- CRASHTYPE(EXEC_RODATA),
- CRASHTYPE(EXEC_USERSPACE),
- CRASHTYPE(ACCESS_USERSPACE),
- CRASHTYPE(WRITE_RO),
- CRASHTYPE(WRITE_RO_AFTER_INIT),
- CRASHTYPE(WRITE_KERN),
- CRASHTYPE(REFCOUNT_INC_OVERFLOW),
- CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
- CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
- CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
- CRASHTYPE(REFCOUNT_DEC_ZERO),
- CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
- CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
- CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
- CRASHTYPE(REFCOUNT_INC_ZERO),
- CRASHTYPE(REFCOUNT_ADD_ZERO),
- CRASHTYPE(REFCOUNT_INC_SATURATED),
- CRASHTYPE(REFCOUNT_DEC_SATURATED),
- CRASHTYPE(REFCOUNT_ADD_SATURATED),
- CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
- CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
- CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
- CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
- CRASHTYPE(REFCOUNT_TIMING),
- CRASHTYPE(ATOMIC_TIMING),
- CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
- CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
- CRASHTYPE(USERCOPY_STACK_FRAME_TO),
- CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
- CRASHTYPE(USERCOPY_STACK_BEYOND),
- CRASHTYPE(USERCOPY_KERNEL),
-};
-
-
-/* Global kprobe entry and crashtype. */
-static struct kprobe *lkdtm_kprobe;
-static struct crashpoint *lkdtm_crashpoint;
-static const struct crashtype *lkdtm_crashtype;
-
-/* Module parameters */
-static int recur_count = -1;
-module_param(recur_count, int, 0644);
-MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
-
-static char* cpoint_name;
-module_param(cpoint_name, charp, 0444);
-MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
-
-static char* cpoint_type;
-module_param(cpoint_type, charp, 0444);
-MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
- "hitting the crash point");
-
-static int cpoint_count = DEFAULT_COUNT;
-module_param(cpoint_count, int, 0644);
-MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
- "crash point is to be hit to trigger action");
-
-
-/* Return the crashtype number or NULL if the name is invalid */
-static const struct crashtype *find_crashtype(const char *name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- if (!strcmp(name, crashtypes[i].name))
- return &crashtypes[i];
- }
-
- return NULL;
-}
-
-/*
- * This is forced noinline just so it distinctly shows up in the stackdump
- * which makes validation of expected lkdtm crashes easier.
- */
-static noinline void lkdtm_do_action(const struct crashtype *crashtype)
-{
- if (WARN_ON(!crashtype || !crashtype->func))
- return;
- crashtype->func();
-}
-
-static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
- const struct crashtype *crashtype)
-{
- int ret;
-
- /* If this doesn't have a symbol, just call immediately. */
- if (!crashpoint->kprobe.symbol_name) {
- lkdtm_do_action(crashtype);
- return 0;
- }
-
- if (lkdtm_kprobe != NULL)
- unregister_kprobe(lkdtm_kprobe);
-
- lkdtm_crashpoint = crashpoint;
- lkdtm_crashtype = crashtype;
- lkdtm_kprobe = &crashpoint->kprobe;
- ret = register_kprobe(lkdtm_kprobe);
- if (ret < 0) {
- pr_info("Couldn't register kprobe %s\n",
- crashpoint->kprobe.symbol_name);
- lkdtm_kprobe = NULL;
- lkdtm_crashpoint = NULL;
- lkdtm_crashtype = NULL;
- }
-
- return ret;
-}
-
-#ifdef CONFIG_KPROBES
-/* Global crash counter and spinlock. */
-static int crash_count = DEFAULT_COUNT;
-static DEFINE_SPINLOCK(crash_count_lock);
-
-/* Called by kprobe entry points. */
-static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
-{
- unsigned long flags;
- bool do_it = false;
-
- if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype))
- return 0;
-
- spin_lock_irqsave(&crash_count_lock, flags);
- crash_count--;
- pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
- lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
-
- if (crash_count == 0) {
- do_it = true;
- crash_count = cpoint_count;
- }
- spin_unlock_irqrestore(&crash_count_lock, flags);
-
- if (do_it)
- lkdtm_do_action(lkdtm_crashtype);
-
- return 0;
-}
-
-static ssize_t lkdtm_debugfs_entry(struct file *f,
- const char __user *user_buf,
- size_t count, loff_t *off)
-{
- struct crashpoint *crashpoint = file_inode(f)->i_private;
- const struct crashtype *crashtype = NULL;
- char *buf;
- int err;
-
- if (count >= PAGE_SIZE)
- return -EINVAL;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- if (copy_from_user(buf, user_buf, count)) {
- free_page((unsigned long) buf);
- return -EFAULT;
- }
- /* NULL-terminate and remove enter */
- buf[count] = '\0';
- strim(buf);
-
- crashtype = find_crashtype(buf);
- free_page((unsigned long)buf);
-
- if (!crashtype)
- return -EINVAL;
-
- err = lkdtm_register_cpoint(crashpoint, crashtype);
- if (err < 0)
- return err;
-
- *off += count;
-
- return count;
-}
-#endif
-
-/* Generic read callback that just prints out the available crash types */
-static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
- size_t count, loff_t *off)
-{
- char *buf;
- int i, n, out;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
- for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
- crashtypes[i].name);
- }
- buf[n] = '\0';
-
- out = simple_read_from_buffer(user_buf, count, off,
- buf, n);
- free_page((unsigned long) buf);
-
- return out;
-}
-
-static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-/* Special entry to just crash directly. Available without KPROBEs */
-static ssize_t direct_entry(struct file *f, const char __user *user_buf,
- size_t count, loff_t *off)
-{
- const struct crashtype *crashtype;
- char *buf;
-
- if (count >= PAGE_SIZE)
- return -EINVAL;
- if (count < 1)
- return -EINVAL;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- if (copy_from_user(buf, user_buf, count)) {
- free_page((unsigned long) buf);
- return -EFAULT;
- }
- /* NULL-terminate and remove enter */
- buf[count] = '\0';
- strim(buf);
-
- crashtype = find_crashtype(buf);
- free_page((unsigned long) buf);
- if (!crashtype)
- return -EINVAL;
-
- pr_info("Performing direct entry %s\n", crashtype->name);
- lkdtm_do_action(crashtype);
- *off += count;
-
- return count;
-}
-
-static struct dentry *lkdtm_debugfs_root;
-
-static int __init lkdtm_module_init(void)
-{
- struct crashpoint *crashpoint = NULL;
- const struct crashtype *crashtype = NULL;
- int ret = -EINVAL;
- int i;
-
- /* Neither or both of these need to be set */
- if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
- pr_err("Need both cpoint_type and cpoint_name or neither\n");
- return -EINVAL;
- }
-
- if (cpoint_type) {
- crashtype = find_crashtype(cpoint_type);
- if (!crashtype) {
- pr_err("Unknown crashtype '%s'\n", cpoint_type);
- return -EINVAL;
- }
- }
-
- if (cpoint_name) {
- for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
- if (!strcmp(cpoint_name, crashpoints[i].name))
- crashpoint = &crashpoints[i];
- }
-
- /* Refuse unknown crashpoints. */
- if (!crashpoint) {
- pr_err("Invalid crashpoint %s\n", cpoint_name);
- return -EINVAL;
- }
- }
-
-#ifdef CONFIG_KPROBES
- /* Set crash count. */
- crash_count = cpoint_count;
-#endif
-
- /* Handle test-specific initialization. */
- lkdtm_bugs_init(&recur_count);
- lkdtm_perms_init();
- lkdtm_usercopy_init();
-
- /* Register debugfs interface */
- lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
- if (!lkdtm_debugfs_root) {
- pr_err("creating root dir failed\n");
- return -ENODEV;
- }
-
- /* Install debugfs trigger files. */
- for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
- struct crashpoint *cur = &crashpoints[i];
- struct dentry *de;
-
- de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
- cur, &cur->fops);
- if (de == NULL) {
- pr_err("could not create crashpoint %s\n", cur->name);
- goto out_err;
- }
- }
-
- /* Install crashpoint if one was selected. */
- if (crashpoint) {
- ret = lkdtm_register_cpoint(crashpoint, crashtype);
- if (ret < 0) {
- pr_info("Invalid crashpoint %s\n", crashpoint->name);
- goto out_err;
- }
- pr_info("Crash point %s of type %s registered\n",
- crashpoint->name, cpoint_type);
- } else {
- pr_info("No crash points registered, enable through debugfs\n");
- }
-
- return 0;
-
-out_err:
- debugfs_remove_recursive(lkdtm_debugfs_root);
- return ret;
-}
-
-static void __exit lkdtm_module_exit(void)
-{
- debugfs_remove_recursive(lkdtm_debugfs_root);
-
- /* Handle test-specific clean-up. */
- lkdtm_usercopy_exit();
-
- if (lkdtm_kprobe != NULL)
- unregister_kprobe(lkdtm_kprobe);
-
- pr_info("Crash point unregistered\n");
-}
-
-module_init(lkdtm_module_init);
-module_exit(lkdtm_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Kernel crash testing module");
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is for all the tests relating directly to heap memory, including
- * page allocation and slab allocations.
- */
-#include "lkdtm.h"
-#include <linux/slab.h>
-#include <linux/sched.h>
-
-/*
- * This tries to stay within the next largest power-of-2 kmalloc cache
- * to avoid actually overwriting anything important if it's not detected
- * correctly.
- */
-void lkdtm_OVERWRITE_ALLOCATION(void)
-{
- size_t len = 1020;
- u32 *data = kmalloc(len, GFP_KERNEL);
- if (!data)
- return;
-
- data[1024 / sizeof(u32)] = 0x12345678;
- kfree(data);
-}
-
-void lkdtm_WRITE_AFTER_FREE(void)
-{
- int *base, *again;
- size_t len = 1024;
- /*
- * The slub allocator uses the first word to store the free
- * pointer in some configurations. Use the middle of the
- * allocation to avoid running into the freelist
- */
- size_t offset = (len / sizeof(*base)) / 2;
-
- base = kmalloc(len, GFP_KERNEL);
- if (!base)
- return;
- pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
- pr_info("Attempting bad write to freed memory at %p\n",
- &base[offset]);
- kfree(base);
- base[offset] = 0x0abcdef0;
- /* Attempt to notice the overwrite. */
- again = kmalloc(len, GFP_KERNEL);
- kfree(again);
- if (again != base)
- pr_info("Hmm, didn't get the same memory range.\n");
-}
-
-void lkdtm_READ_AFTER_FREE(void)
-{
- int *base, *val, saw;
- size_t len = 1024;
- /*
- * The slub allocator uses the first word to store the free
- * pointer in some configurations. Use the middle of the
- * allocation to avoid running into the freelist
- */
- size_t offset = (len / sizeof(*base)) / 2;
-
- base = kmalloc(len, GFP_KERNEL);
- if (!base) {
- pr_info("Unable to allocate base memory.\n");
- return;
- }
-
- val = kmalloc(len, GFP_KERNEL);
- if (!val) {
- pr_info("Unable to allocate val memory.\n");
- kfree(base);
- return;
- }
-
- *val = 0x12345678;
- base[offset] = *val;
- pr_info("Value in memory before free: %x\n", base[offset]);
-
- kfree(base);
-
- pr_info("Attempting bad read from freed memory\n");
- saw = base[offset];
- if (saw != *val) {
- /* Good! Poisoning happened, so declare a win. */
- pr_info("Memory correctly poisoned (%x)\n", saw);
- BUG();
- }
- pr_info("Memory was not poisoned\n");
-
- kfree(val);
-}
-
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
-{
- unsigned long p = __get_free_page(GFP_KERNEL);
- if (!p) {
- pr_info("Unable to allocate free page\n");
- return;
- }
-
- pr_info("Writing to the buddy page before free\n");
- memset((void *)p, 0x3, PAGE_SIZE);
- free_page(p);
- schedule();
- pr_info("Attempting bad write to the buddy page after free\n");
- memset((void *)p, 0x78, PAGE_SIZE);
- /* Attempt to notice the overwrite. */
- p = __get_free_page(GFP_KERNEL);
- free_page(p);
- schedule();
-}
-
-void lkdtm_READ_BUDDY_AFTER_FREE(void)
-{
- unsigned long p = __get_free_page(GFP_KERNEL);
- int saw, *val;
- int *base;
-
- if (!p) {
- pr_info("Unable to allocate free page\n");
- return;
- }
-
- val = kmalloc(1024, GFP_KERNEL);
- if (!val) {
- pr_info("Unable to allocate val memory.\n");
- free_page(p);
- return;
- }
-
- base = (int *)p;
-
- *val = 0x12345678;
- base[0] = *val;
- pr_info("Value in memory before free: %x\n", base[0]);
- free_page(p);
- pr_info("Attempting to read from freed memory\n");
- saw = base[0];
- if (saw != *val) {
- /* Good! Poisoning happened, so declare a win. */
- pr_info("Memory correctly poisoned (%x)\n", saw);
- BUG();
- }
- pr_info("Buddy page was not poisoned\n");
-
- kfree(val);
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is for all the tests related to validating kernel memory
- * permissions: non-executable regions, non-writable regions, and
- * even non-readable regions.
- */
-#include "lkdtm.h"
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mman.h>
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-
-/* Whether or not to fill the target memory area with do_nothing(). */
-#define CODE_WRITE true
-#define CODE_AS_IS false
-
-/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
-#define EXEC_SIZE 64
-
-/* This is non-const, so it will end up in the .data section. */
-static u8 data_area[EXEC_SIZE];
-
-/* This is cost, so it will end up in the .rodata section. */
-static const unsigned long rodata = 0xAA55AA55;
-
-/* This is marked __ro_after_init, so it should ultimately be .rodata. */
-static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
-
-/*
- * This just returns to the caller. It is designed to be copied into
- * non-executable memory regions.
- */
-static void do_nothing(void)
-{
- return;
-}
-
-/* Must immediately follow do_nothing for size calculuations to work out. */
-static void do_overwritten(void)
-{
- pr_info("do_overwritten wasn't overwritten!\n");
- return;
-}
-
-static noinline void execute_location(void *dst, bool write)
-{
- void (*func)(void) = dst;
-
- pr_info("attempting ok execution at %p\n", do_nothing);
- do_nothing();
-
- if (write == CODE_WRITE) {
- memcpy(dst, do_nothing, EXEC_SIZE);
- flush_icache_range((unsigned long)dst,
- (unsigned long)dst + EXEC_SIZE);
- }
- pr_info("attempting bad execution at %p\n", func);
- func();
-}
-
-static void execute_user_location(void *dst)
-{
- int copied;
-
- /* Intentionally crossing kernel/user memory boundary. */
- void (*func)(void) = dst;
-
- pr_info("attempting ok execution at %p\n", do_nothing);
- do_nothing();
-
- copied = access_process_vm(current, (unsigned long)dst, do_nothing,
- EXEC_SIZE, FOLL_WRITE);
- if (copied < EXEC_SIZE)
- return;
- pr_info("attempting bad execution at %p\n", func);
- func();
-}
-
-void lkdtm_WRITE_RO(void)
-{
- /* Explicitly cast away "const" for the test. */
- unsigned long *ptr = (unsigned long *)&rodata;
-
- pr_info("attempting bad rodata write at %p\n", ptr);
- *ptr ^= 0xabcd1234;
-}
-
-void lkdtm_WRITE_RO_AFTER_INIT(void)
-{
- unsigned long *ptr = &ro_after_init;
-
- /*
- * Verify we were written to during init. Since an Oops
- * is considered a "success", a failure is to just skip the
- * real test.
- */
- if ((*ptr & 0xAA) != 0xAA) {
- pr_info("%p was NOT written during init!?\n", ptr);
- return;
- }
-
- pr_info("attempting bad ro_after_init write at %p\n", ptr);
- *ptr ^= 0xabcd1234;
-}
-
-void lkdtm_WRITE_KERN(void)
-{
- size_t size;
- unsigned char *ptr;
-
- size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
- ptr = (unsigned char *)do_overwritten;
-
- pr_info("attempting bad %zu byte write at %p\n", size, ptr);
- memcpy(ptr, (unsigned char *)do_nothing, size);
- flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
-
- do_overwritten();
-}
-
-void lkdtm_EXEC_DATA(void)
-{
- execute_location(data_area, CODE_WRITE);
-}
-
-void lkdtm_EXEC_STACK(void)
-{
- u8 stack_area[EXEC_SIZE];
- execute_location(stack_area, CODE_WRITE);
-}
-
-void lkdtm_EXEC_KMALLOC(void)
-{
- u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
- execute_location(kmalloc_area, CODE_WRITE);
- kfree(kmalloc_area);
-}
-
-void lkdtm_EXEC_VMALLOC(void)
-{
- u32 *vmalloc_area = vmalloc(EXEC_SIZE);
- execute_location(vmalloc_area, CODE_WRITE);
- vfree(vmalloc_area);
-}
-
-void lkdtm_EXEC_RODATA(void)
-{
- execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
-}
-
-void lkdtm_EXEC_USERSPACE(void)
-{
- unsigned long user_addr;
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= TASK_SIZE) {
- pr_warn("Failed to allocate user memory\n");
- return;
- }
- execute_user_location((void *)user_addr);
- vm_munmap(user_addr, PAGE_SIZE);
-}
-
-void lkdtm_ACCESS_USERSPACE(void)
-{
- unsigned long user_addr, tmp = 0;
- unsigned long *ptr;
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= TASK_SIZE) {
- pr_warn("Failed to allocate user memory\n");
- return;
- }
-
- if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
- pr_warn("copy_to_user failed\n");
- vm_munmap(user_addr, PAGE_SIZE);
- return;
- }
-
- ptr = (unsigned long *)user_addr;
-
- pr_info("attempting bad read at %p\n", ptr);
- tmp = *ptr;
- tmp += 0xc0dec0de;
-
- pr_info("attempting bad write at %p\n", ptr);
- *ptr = tmp;
-
- vm_munmap(user_addr, PAGE_SIZE);
-}
-
-void __init lkdtm_perms_init(void)
-{
- /* Make sure we can write to __ro_after_init values during __init */
- ro_after_init |= 0xAA;
-
-}
+++ /dev/null
-/*
- * This is for all the tests related to refcount bugs (e.g. overflow,
- * underflow, reaching zero untested, etc).
- */
-#include "lkdtm.h"
-#include <linux/refcount.h>
-
-#ifdef CONFIG_REFCOUNT_FULL
-#define REFCOUNT_MAX (UINT_MAX - 1)
-#define REFCOUNT_SATURATED UINT_MAX
-#else
-#define REFCOUNT_MAX INT_MAX
-#define REFCOUNT_SATURATED (INT_MIN / 2)
-#endif
-
-static void overflow_check(refcount_t *ref)
-{
- switch (refcount_read(ref)) {
- case REFCOUNT_SATURATED:
- pr_info("Overflow detected: saturated\n");
- break;
- case REFCOUNT_MAX:
- pr_warn("Overflow detected: unsafely reset to max\n");
- break;
- default:
- pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref));
- }
-}
-
-/*
- * A refcount_inc() above the maximum value of the refcount implementation,
- * should at least saturate, and at most also WARN.
- */
-void lkdtm_REFCOUNT_INC_OVERFLOW(void)
-{
- refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
-
- pr_info("attempting good refcount_inc() without overflow\n");
- refcount_dec(&over);
- refcount_inc(&over);
-
- pr_info("attempting bad refcount_inc() overflow\n");
- refcount_inc(&over);
- refcount_inc(&over);
-
- overflow_check(&over);
-}
-
-/* refcount_add() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
-{
- refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
-
- pr_info("attempting good refcount_add() without overflow\n");
- refcount_dec(&over);
- refcount_dec(&over);
- refcount_dec(&over);
- refcount_dec(&over);
- refcount_add(4, &over);
-
- pr_info("attempting bad refcount_add() overflow\n");
- refcount_add(4, &over);
-
- overflow_check(&over);
-}
-
-/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
-{
- refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
-
- pr_info("attempting bad refcount_inc_not_zero() overflow\n");
- if (!refcount_inc_not_zero(&over))
- pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
-
- overflow_check(&over);
-}
-
-/* refcount_add_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
-{
- refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
-
- pr_info("attempting bad refcount_add_not_zero() overflow\n");
- if (!refcount_add_not_zero(6, &over))
- pr_warn("Weird: refcount_add_not_zero() reported zero\n");
-
- overflow_check(&over);
-}
-
-static void check_zero(refcount_t *ref)
-{
- switch (refcount_read(ref)) {
- case REFCOUNT_SATURATED:
- pr_info("Zero detected: saturated\n");
- break;
- case REFCOUNT_MAX:
- pr_warn("Zero detected: unsafely reset to max\n");
- break;
- case 0:
- pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n");
- break;
- default:
- pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
- }
-}
-
-/*
- * A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits
- * zero it should either saturate (when inc-from-zero isn't protected)
- * or stay at zero (when inc-from-zero is protected) and should WARN for both.
- */
-void lkdtm_REFCOUNT_DEC_ZERO(void)
-{
- refcount_t zero = REFCOUNT_INIT(2);
-
- pr_info("attempting good refcount_dec()\n");
- refcount_dec(&zero);
-
- pr_info("attempting bad refcount_dec() to zero\n");
- refcount_dec(&zero);
-
- check_zero(&zero);
-}
-
-static void check_negative(refcount_t *ref, int start)
-{
- /*
- * CONFIG_REFCOUNT_FULL refuses to move a refcount at all on an
- * over-sub, so we have to track our starting position instead of
- * looking only at zero-pinning.
- */
- if (refcount_read(ref) == start) {
- pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n",
- start);
- return;
- }
-
- switch (refcount_read(ref)) {
- case REFCOUNT_SATURATED:
- pr_info("Negative detected: saturated\n");
- break;
- case REFCOUNT_MAX:
- pr_warn("Negative detected: unsafely reset to max\n");
- break;
- default:
- pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
- }
-}
-
-/* A refcount_dec() going negative should saturate and may WARN. */
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
-{
- refcount_t neg = REFCOUNT_INIT(0);
-
- pr_info("attempting bad refcount_dec() below zero\n");
- refcount_dec(&neg);
-
- check_negative(&neg, 0);
-}
-
-/*
- * A refcount_dec_and_test() should act like refcount_dec() above when
- * going negative.
- */
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
-{
- refcount_t neg = REFCOUNT_INIT(0);
-
- pr_info("attempting bad refcount_dec_and_test() below zero\n");
- if (refcount_dec_and_test(&neg))
- pr_warn("Weird: refcount_dec_and_test() reported zero\n");
-
- check_negative(&neg, 0);
-}
-
-/*
- * A refcount_sub_and_test() should act like refcount_dec_and_test()
- * above when going negative.
- */
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
-{
- refcount_t neg = REFCOUNT_INIT(3);
-
- pr_info("attempting bad refcount_sub_and_test() below zero\n");
- if (refcount_sub_and_test(5, &neg))
- pr_warn("Weird: refcount_sub_and_test() reported zero\n");
-
- check_negative(&neg, 3);
-}
-
-static void check_from_zero(refcount_t *ref)
-{
- switch (refcount_read(ref)) {
- case 0:
- pr_info("Zero detected: stayed at zero\n");
- break;
- case REFCOUNT_SATURATED:
- pr_info("Zero detected: saturated\n");
- break;
- case REFCOUNT_MAX:
- pr_warn("Zero detected: unsafely reset to max\n");
- break;
- default:
- pr_info("Fail: zero not detected, incremented to %d\n",
- refcount_read(ref));
- }
-}
-
-/*
- * A refcount_inc() from zero should pin to zero or saturate and may WARN.
- * Only CONFIG_REFCOUNT_FULL provides this protection currently.
- */
-void lkdtm_REFCOUNT_INC_ZERO(void)
-{
- refcount_t zero = REFCOUNT_INIT(0);
-
- pr_info("attempting safe refcount_inc_not_zero() from zero\n");
- if (!refcount_inc_not_zero(&zero)) {
- pr_info("Good: zero detected\n");
- if (refcount_read(&zero) == 0)
- pr_info("Correctly stayed at zero\n");
- else
- pr_err("Fail: refcount went past zero!\n");
- } else {
- pr_err("Fail: Zero not detected!?\n");
- }
-
- pr_info("attempting bad refcount_inc() from zero\n");
- refcount_inc(&zero);
-
- check_from_zero(&zero);
-}
-
-/*
- * A refcount_add() should act like refcount_inc() above when starting
- * at zero.
- */
-void lkdtm_REFCOUNT_ADD_ZERO(void)
-{
- refcount_t zero = REFCOUNT_INIT(0);
-
- pr_info("attempting safe refcount_add_not_zero() from zero\n");
- if (!refcount_add_not_zero(3, &zero)) {
- pr_info("Good: zero detected\n");
- if (refcount_read(&zero) == 0)
- pr_info("Correctly stayed at zero\n");
- else
- pr_err("Fail: refcount went past zero\n");
- } else {
- pr_err("Fail: Zero not detected!?\n");
- }
-
- pr_info("attempting bad refcount_add() from zero\n");
- refcount_add(3, &zero);
-
- check_from_zero(&zero);
-}
-
-static void check_saturated(refcount_t *ref)
-{
- switch (refcount_read(ref)) {
- case REFCOUNT_SATURATED:
- pr_info("Saturation detected: still saturated\n");
- break;
- case REFCOUNT_MAX:
- pr_warn("Saturation detected: unsafely reset to max\n");
- break;
- default:
- pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
- }
-}
-
-/*
- * A refcount_inc() from a saturated value should at most warn about
- * being saturated already.
- */
-void lkdtm_REFCOUNT_INC_SATURATED(void)
-{
- refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
-
- pr_info("attempting bad refcount_inc() from saturated\n");
- refcount_inc(&sat);
-
- check_saturated(&sat);
-}
-
-/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_SATURATED(void)
-{
- refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
-
- pr_info("attempting bad refcount_dec() from saturated\n");
- refcount_dec(&sat);
-
- check_saturated(&sat);
-}
-
-/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_SATURATED(void)
-{
- refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
-
- pr_info("attempting bad refcount_dec() from saturated\n");
- refcount_add(8, &sat);
-
- check_saturated(&sat);
-}
-
-/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
-{
- refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
-
- pr_info("attempting bad refcount_inc_not_zero() from saturated\n");
- if (!refcount_inc_not_zero(&sat))
- pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
-
- check_saturated(&sat);
-}
-
-/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
-{
- refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
-
- pr_info("attempting bad refcount_add_not_zero() from saturated\n");
- if (!refcount_add_not_zero(7, &sat))
- pr_warn("Weird: refcount_add_not_zero() reported zero\n");
-
- check_saturated(&sat);
-}
-
-/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
-{
- refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
-
- pr_info("attempting bad refcount_dec_and_test() from saturated\n");
- if (refcount_dec_and_test(&sat))
- pr_warn("Weird: refcount_dec_and_test() reported zero\n");
-
- check_saturated(&sat);
-}
-
-/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
-{
- refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
-
- pr_info("attempting bad refcount_sub_and_test() from saturated\n");
- if (refcount_sub_and_test(8, &sat))
- pr_warn("Weird: refcount_sub_and_test() reported zero\n");
-
- check_saturated(&sat);
-}
-
-/* Used to time the existing atomic_t when used for reference counting */
-void lkdtm_ATOMIC_TIMING(void)
-{
- unsigned int i;
- atomic_t count = ATOMIC_INIT(1);
-
- for (i = 0; i < INT_MAX - 1; i++)
- atomic_inc(&count);
-
- for (i = INT_MAX; i > 0; i--)
- if (atomic_dec_and_test(&count))
- break;
-
- if (i != 1)
- pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1);
- else
- pr_info("atomic timing: done\n");
-}
-
-/*
- * This can be compared to ATOMIC_TIMING when implementing fast refcount
- * protections. Looking at the number of CPU cycles tells the real story
- * about performance. For example:
- * cd /sys/kernel/debug/provoke-crash
- * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
- */
-void lkdtm_REFCOUNT_TIMING(void)
-{
- unsigned int i;
- refcount_t count = REFCOUNT_INIT(1);
-
- for (i = 0; i < INT_MAX - 1; i++)
- refcount_inc(&count);
-
- for (i = INT_MAX; i > 0; i--)
- if (refcount_dec_and_test(&count))
- break;
-
- if (i != 1)
- pr_err("refcount: out of sync up/down cycle: %u\n", i - 1);
- else
- pr_info("refcount timing: done\n");
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This includes functions that are meant to live entirely in .rodata
- * (via objcopy tricks), to validate the non-executability of .rodata.
- */
-#include "lkdtm.h"
-
-void notrace lkdtm_rodata_do_nothing(void)
-{
- /* Does nothing. We just want an architecture agnostic "return". */
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is for all the tests related to copy_to_user() and copy_from_user()
- * hardening.
- */
-#include "lkdtm.h"
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mman.h>
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-
-/*
- * Many of the tests here end up using const sizes, but those would
- * normally be ignored by hardened usercopy, so force the compiler
- * into choosing the non-const path to make sure we trigger the
- * hardened usercopy checks by added "unconst" to all the const copies,
- * and making sure "cache_size" isn't optimized into a const.
- */
-static volatile size_t unconst = 0;
-static volatile size_t cache_size = 1024;
-static struct kmem_cache *whitelist_cache;
-
-static const unsigned char test_text[] = "This is a test.\n";
-
-/*
- * Instead of adding -Wno-return-local-addr, just pass the stack address
- * through a function to obfuscate it from the compiler.
- */
-static noinline unsigned char *trick_compiler(unsigned char *stack)
-{
- return stack + 0;
-}
-
-static noinline unsigned char *do_usercopy_stack_callee(int value)
-{
- unsigned char buf[32];
- int i;
-
- /* Exercise stack to avoid everything living in registers. */
- for (i = 0; i < sizeof(buf); i++) {
- buf[i] = value & 0xff;
- }
-
- return trick_compiler(buf);
-}
-
-static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
-{
- unsigned long user_addr;
- unsigned char good_stack[32];
- unsigned char *bad_stack;
- int i;
-
- /* Exercise stack to avoid everything living in registers. */
- for (i = 0; i < sizeof(good_stack); i++)
- good_stack[i] = test_text[i % sizeof(test_text)];
-
- /* This is a pointer to outside our current stack frame. */
- if (bad_frame) {
- bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
- } else {
- /* Put start address just inside stack. */
- bad_stack = task_stack_page(current) + THREAD_SIZE;
- bad_stack -= sizeof(unsigned long);
- }
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= TASK_SIZE) {
- pr_warn("Failed to allocate user memory\n");
- return;
- }
-
- if (to_user) {
- pr_info("attempting good copy_to_user of local stack\n");
- if (copy_to_user((void __user *)user_addr, good_stack,
- unconst + sizeof(good_stack))) {
- pr_warn("copy_to_user failed unexpectedly?!\n");
- goto free_user;
- }
-
- pr_info("attempting bad copy_to_user of distant stack\n");
- if (copy_to_user((void __user *)user_addr, bad_stack,
- unconst + sizeof(good_stack))) {
- pr_warn("copy_to_user failed, but lacked Oops\n");
- goto free_user;
- }
- } else {
- /*
- * There isn't a safe way to not be protected by usercopy
- * if we're going to write to another thread's stack.
- */
- if (!bad_frame)
- goto free_user;
-
- pr_info("attempting good copy_from_user of local stack\n");
- if (copy_from_user(good_stack, (void __user *)user_addr,
- unconst + sizeof(good_stack))) {
- pr_warn("copy_from_user failed unexpectedly?!\n");
- goto free_user;
- }
-
- pr_info("attempting bad copy_from_user of distant stack\n");
- if (copy_from_user(bad_stack, (void __user *)user_addr,
- unconst + sizeof(good_stack))) {
- pr_warn("copy_from_user failed, but lacked Oops\n");
- goto free_user;
- }
- }
-
-free_user:
- vm_munmap(user_addr, PAGE_SIZE);
-}
-
-/*
- * This checks for whole-object size validation with hardened usercopy,
- * with or without usercopy whitelisting.
- */
-static void do_usercopy_heap_size(bool to_user)
-{
- unsigned long user_addr;
- unsigned char *one, *two;
- void __user *test_user_addr;
- void *test_kern_addr;
- size_t size = unconst + 1024;
-
- one = kmalloc(size, GFP_KERNEL);
- two = kmalloc(size, GFP_KERNEL);
- if (!one || !two) {
- pr_warn("Failed to allocate kernel memory\n");
- goto free_kernel;
- }
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= TASK_SIZE) {
- pr_warn("Failed to allocate user memory\n");
- goto free_kernel;
- }
-
- memset(one, 'A', size);
- memset(two, 'B', size);
-
- test_user_addr = (void __user *)(user_addr + 16);
- test_kern_addr = one + 16;
-
- if (to_user) {
- pr_info("attempting good copy_to_user of correct size\n");
- if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
- pr_warn("copy_to_user failed unexpectedly?!\n");
- goto free_user;
- }
-
- pr_info("attempting bad copy_to_user of too large size\n");
- if (copy_to_user(test_user_addr, test_kern_addr, size)) {
- pr_warn("copy_to_user failed, but lacked Oops\n");
- goto free_user;
- }
- } else {
- pr_info("attempting good copy_from_user of correct size\n");
- if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
- pr_warn("copy_from_user failed unexpectedly?!\n");
- goto free_user;
- }
-
- pr_info("attempting bad copy_from_user of too large size\n");
- if (copy_from_user(test_kern_addr, test_user_addr, size)) {
- pr_warn("copy_from_user failed, but lacked Oops\n");
- goto free_user;
- }
- }
-
-free_user:
- vm_munmap(user_addr, PAGE_SIZE);
-free_kernel:
- kfree(one);
- kfree(two);
-}
-
-/*
- * This checks for the specific whitelist window within an object. If this
- * test passes, then do_usercopy_heap_size() tests will pass too.
- */
-static void do_usercopy_heap_whitelist(bool to_user)
-{
- unsigned long user_alloc;
- unsigned char *buf = NULL;
- unsigned char __user *user_addr;
- size_t offset, size;
-
- /* Make sure cache was prepared. */
- if (!whitelist_cache) {
- pr_warn("Failed to allocate kernel cache\n");
- return;
- }
-
- /*
- * Allocate a buffer with a whitelisted window in the buffer.
- */
- buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
- if (!buf) {
- pr_warn("Failed to allocate buffer from whitelist cache\n");
- goto free_alloc;
- }
-
- /* Allocate user memory we'll poke at. */
- user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_alloc >= TASK_SIZE) {
- pr_warn("Failed to allocate user memory\n");
- goto free_alloc;
- }
- user_addr = (void __user *)user_alloc;
-
- memset(buf, 'B', cache_size);
-
- /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
- offset = (cache_size / 4) + unconst;
- size = (cache_size / 16) + unconst;
-
- if (to_user) {
- pr_info("attempting good copy_to_user inside whitelist\n");
- if (copy_to_user(user_addr, buf + offset, size)) {
- pr_warn("copy_to_user failed unexpectedly?!\n");
- goto free_user;
- }
-
- pr_info("attempting bad copy_to_user outside whitelist\n");
- if (copy_to_user(user_addr, buf + offset - 1, size)) {
- pr_warn("copy_to_user failed, but lacked Oops\n");
- goto free_user;
- }
- } else {
- pr_info("attempting good copy_from_user inside whitelist\n");
- if (copy_from_user(buf + offset, user_addr, size)) {
- pr_warn("copy_from_user failed unexpectedly?!\n");
- goto free_user;
- }
-
- pr_info("attempting bad copy_from_user outside whitelist\n");
- if (copy_from_user(buf + offset - 1, user_addr, size)) {
- pr_warn("copy_from_user failed, but lacked Oops\n");
- goto free_user;
- }
- }
-
-free_user:
- vm_munmap(user_alloc, PAGE_SIZE);
-free_alloc:
- if (buf)
- kmem_cache_free(whitelist_cache, buf);
-}
-
-/* Callable tests. */
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
-{
- do_usercopy_heap_size(true);
-}
-
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
-{
- do_usercopy_heap_size(false);
-}
-
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
-{
- do_usercopy_heap_whitelist(true);
-}
-
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
-{
- do_usercopy_heap_whitelist(false);
-}
-
-void lkdtm_USERCOPY_STACK_FRAME_TO(void)
-{
- do_usercopy_stack(true, true);
-}
-
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
-{
- do_usercopy_stack(false, true);
-}
-
-void lkdtm_USERCOPY_STACK_BEYOND(void)
-{
- do_usercopy_stack(true, false);
-}
-
-void lkdtm_USERCOPY_KERNEL(void)
-{
- unsigned long user_addr;
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= TASK_SIZE) {
- pr_warn("Failed to allocate user memory\n");
- return;
- }
-
- pr_info("attempting good copy_to_user from kernel rodata\n");
- if (copy_to_user((void __user *)user_addr, test_text,
- unconst + sizeof(test_text))) {
- pr_warn("copy_to_user failed unexpectedly?!\n");
- goto free_user;
- }
-
- pr_info("attempting bad copy_to_user from kernel text\n");
- if (copy_to_user((void __user *)user_addr, vm_mmap,
- unconst + PAGE_SIZE)) {
- pr_warn("copy_to_user failed, but lacked Oops\n");
- goto free_user;
- }
-
-free_user:
- vm_munmap(user_addr, PAGE_SIZE);
-}
-
-void __init lkdtm_usercopy_init(void)
-{
- /* Prepare cache that lacks SLAB_USERCOPY flag. */
- whitelist_cache =
- kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
- 0, 0,
- cache_size / 4,
- cache_size / 16,
- NULL);
-}
-
-void __exit lkdtm_usercopy_exit(void)
-{
- kmem_cache_destroy(whitelist_cache);
-}