* - Incorporating suggestions made by Linus Torvalds and Dave Miller
*/
#ifdef __KERNEL__
+
#include <asm/page.h>
+/*
+ * Page fault error code bits
+ */
+#define FAULT_CODE_WRITE (1 << 0) /* write access */
+#define FAULT_CODE_INITIAL (1 << 1) /* initial page write */
+#define FAULT_CODE_ITLB (1 << 2) /* ITLB miss */
+#define FAULT_CODE_PROT (1 << 3) /* protection fault */
+#define FAULT_CODE_USER (1 << 4) /* user-mode access */
+
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#endif /* __ASSEMBLY__ */
/*
- * thread information flags
- * - these are process state flags that various assembly files may need to access
- * - pending work-to-be-done flags are in LSW
- * - other flags in MSW
+ * Thread information flags
+ *
+ * - Limited to 24 bits, upper byte used for fault code encoding.
+ *
+ * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or
+ * we blow the tst immediate size constraints and need to fix up
+ * arch/sh/kernel/entry-common.S.
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
-/*
- * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
- * blow the tst immediate size constraints and need to fix up
- * arch/sh/kernel/entry-common.S.
- */
-
/* work to do in syscall trace */
#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
_TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
#ifndef __ASSEMBLY__
+
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
{
ti->status |= TS_RESTORE_SIGMASK;
set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
}
+
+#define TI_FLAG_FAULT_CODE_SHIFT 24
+
+/*
+ * Additional thread flag encoding
+ */
+static inline void set_thread_fault_code(unsigned int val)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT)))
+ | (val << TI_FLAG_FAULT_CODE_SHIFT);
+}
+
+static inline unsigned int get_thread_fault_code(void)
+{
+ struct thread_info *ti = current_thread_info();
+ return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
+}
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
}
static noinline void
-no_context(struct pt_regs *regs, unsigned long writeaccess,
+no_context(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
/* Are we prepared to handle this kernel fault? */
show_fault_oops(regs, address);
- die("Oops", regs, writeaccess);
+ die("Oops", regs, error_code);
bust_spinlocks(0);
do_exit(SIGKILL);
}
static void
-__bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess,
+__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int si_code)
{
struct task_struct *tsk = current;
return;
}
- no_context(regs, writeaccess, address);
+ no_context(regs, error_code, address);
}
static noinline void
-bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess,
+bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
- __bad_area_nosemaphore(regs, writeaccess, address, SEGV_MAPERR);
+ __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}
static void
-__bad_area(struct pt_regs *regs, unsigned long writeaccess,
+__bad_area(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int si_code)
{
struct mm_struct *mm = current->mm;
*/
up_read(&mm->mmap_sem);
- __bad_area_nosemaphore(regs, writeaccess, address, si_code);
+ __bad_area_nosemaphore(regs, error_code, address, si_code);
}
static noinline void
-bad_area(struct pt_regs *regs, unsigned long writeaccess, unsigned long address)
+bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
- __bad_area(regs, writeaccess, address, SEGV_MAPERR);
+ __bad_area(regs, error_code, address, SEGV_MAPERR);
}
static noinline void
-bad_area_access_error(struct pt_regs *regs, unsigned long writeaccess,
+bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
- __bad_area(regs, writeaccess, address, SEGV_ACCERR);
+ __bad_area(regs, error_code, address, SEGV_ACCERR);
}
static void out_of_memory(void)
}
static void
-do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address)
+do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs))
- no_context(regs, writeaccess, address);
+ no_context(regs, error_code, address);
force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
}
static noinline int
-mm_fault_error(struct pt_regs *regs, unsigned long writeaccess,
+mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
/*
if (!(fault & VM_FAULT_RETRY))
up_read(¤t->mm->mmap_sem);
if (!user_mode(regs))
- no_context(regs, writeaccess, address);
+ no_context(regs, error_code, address);
return 1;
}
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
up_read(¤t->mm->mmap_sem);
- no_context(regs, writeaccess, address);
+ no_context(regs, error_code, address);
return 1;
}
out_of_memory();
} else {
if (fault & VM_FAULT_SIGBUS)
- do_sigbus(regs, writeaccess, address);
+ do_sigbus(regs, error_code, address);
else
BUG();
}
* routines.
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
- unsigned long writeaccess,
+ unsigned long error_code,
unsigned long address)
{
unsigned long vec;
struct mm_struct *mm;
struct vm_area_struct * vma;
int fault;
+ int write = error_code & FAULT_CODE_WRITE;
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (writeaccess ? FAULT_FLAG_WRITE : 0));
+ (write ? FAULT_FLAG_WRITE : 0));
tsk = current;
mm = tsk->mm;
if (notify_page_fault(regs, vec))
return;
- bad_area_nosemaphore(regs, writeaccess, address);
+ bad_area_nosemaphore(regs, error_code, address);
return;
}
* in an atomic region then we must not take the fault:
*/
if (unlikely(in_atomic() || !mm)) {
- bad_area_nosemaphore(regs, writeaccess, address);
+ bad_area_nosemaphore(regs, error_code, address);
return;
}
vma = find_vma(mm, address);
if (unlikely(!vma)) {
- bad_area(regs, writeaccess, address);
+ bad_area(regs, error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- bad_area(regs, writeaccess, address);
+ bad_area(regs, error_code, address);
return;
}
if (unlikely(expand_stack(vma, address))) {
- bad_area(regs, writeaccess, address);
+ bad_area(regs, error_code, address);
return;
}
* we can handle it..
*/
good_area:
- if (unlikely(access_error(writeaccess, vma))) {
- bad_area_access_error(regs, writeaccess, address);
+ if (unlikely(access_error(error_code, vma))) {
+ bad_area_access_error(regs, error_code, address);
return;
}
+ set_thread_fault_code(error_code);
+
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
- if (mm_fault_error(regs, writeaccess, address, fault))
+ if (mm_fault_error(regs, error_code, address, fault))
return;
if (flags & FAULT_FLAG_ALLOW_RETRY) {
* Called with interrupts disabled.
*/
asmlinkage int __kprobes
-handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
+handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
pgd_t *pgd;
entry = *pte;
if (unlikely(pte_none(entry) || pte_not_present(entry)))
return 1;
- if (unlikely(writeaccess && !pte_write(entry)))
+ if (unlikely(error_code && !pte_write(entry)))
return 1;
- if (writeaccess)
+ if (error_code)
entry = pte_mkdirty(entry);
entry = pte_mkyoung(entry);
* the case of an initial page write exception, so we need to
* flush it in order to avoid potential TLB entry duplication.
*/
- if (writeaccess == 2)
+ if (error_code == FAULT_CODE_INITIAL)
local_flush_tlb_one(get_asid(), address & PAGE_MASK);
#endif
+ set_thread_fault_code(error_code);
update_mmu_cache(NULL, address, pte);
return 0;