From 5a1dc78a38bfb04159a08cd493e5b3d844939e6c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 14 May 2012 14:57:28 +0900 Subject: [PATCH] sh: Support thread fault code encoding. This provides a simple interface modelled after sparc64/m32r to encode the error code in the upper byte of thread_info for finer-grained handling in the page fault path. Signed-off-by: Paul Mundt --- arch/sh/include/asm/thread_info.h | 46 ++++++++++++++++----- arch/sh/kernel/cpu/sh3/entry.S | 11 ++--- arch/sh/mm/fault_32.c | 68 ++++++++++++++++--------------- 3 files changed, 78 insertions(+), 47 deletions(-) diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 20ee40af16e9..25a13e534ffe 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -10,8 +10,18 @@ * - Incorporating suggestions made by Linus Torvalds and Dave Miller */ #ifdef __KERNEL__ + #include +/* + * Page fault error code bits + */ +#define FAULT_CODE_WRITE (1 << 0) /* write access */ +#define FAULT_CODE_INITIAL (1 << 1) /* initial page write */ +#define FAULT_CODE_ITLB (1 << 2) /* ITLB miss */ +#define FAULT_CODE_PROT (1 << 3) /* protection fault */ +#define FAULT_CODE_USER (1 << 4) /* user-mode access */ + #ifndef __ASSEMBLY__ #include @@ -107,10 +117,13 @@ extern void init_thread_xstate(void); #endif /* __ASSEMBLY__ */ /* - * thread information flags - * - these are process state flags that various assembly files may need to access - * - pending work-to-be-done flags are in LSW - * - other flags in MSW + * Thread information flags + * + * - Limited to 24 bits, upper byte used for fault code encoding. + * + * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or + * we blow the tst immediate size constraints and need to fix up + * arch/sh/kernel/entry-common.S. */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ @@ -133,12 +146,6 @@ extern void init_thread_xstate(void); #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) -/* - * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we - * blow the tst immediate size constraints and need to fix up - * arch/sh/kernel/entry-common.S. - */ - /* work to do in syscall trace */ #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ @@ -165,6 +172,7 @@ extern void init_thread_xstate(void); #define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */ #ifndef __ASSEMBLY__ + #define HAVE_SET_RESTORE_SIGMASK 1 static inline void set_restore_sigmask(void) { @@ -172,6 +180,24 @@ static inline void set_restore_sigmask(void) ti->status |= TS_RESTORE_SIGMASK; set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); } + +#define TI_FLAG_FAULT_CODE_SHIFT 24 + +/* + * Additional thread flag encoding + */ +static inline void set_thread_fault_code(unsigned int val) +{ + struct thread_info *ti = current_thread_info(); + ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT))) + | (val << TI_FLAG_FAULT_CODE_SHIFT); +} + +static inline unsigned int get_thread_fault_code(void) +{ + struct thread_info *ti = current_thread_info(); + return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; +} #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index f6a389c996cb..262db6ec067b 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -2,7 +2,7 @@ * arch/sh/kernel/cpu/sh3/entry.S * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2003 - 2006 Paul Mundt + * Copyright (C) 2003 - 2012 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -17,6 +17,7 @@ #include #include #include +#include ! NOTE: ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address @@ -114,22 +115,22 @@ ENTRY(tlb_miss_load) .align 2 ENTRY(tlb_miss_store) bra call_handle_tlbmiss - mov #1, r5 + mov #FAULT_CODE_WRITE, r5 .align 2 ENTRY(initial_page_write) bra call_handle_tlbmiss - mov #2, r5 + mov #FAULT_CODE_INITIAL, r5 .align 2 ENTRY(tlb_protection_violation_load) bra call_do_page_fault - mov #0, r5 + mov #FAULT_CODE_PROT, r5 .align 2 ENTRY(tlb_protection_violation_store) bra call_do_page_fault - mov #1, r5 + mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5 call_handle_tlbmiss: mov.l 1f, r0 diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 889e83b5ff22..a469b95e88fb 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -211,7 +211,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long address) } static noinline void -no_context(struct pt_regs *regs, unsigned long writeaccess, +no_context(struct pt_regs *regs, unsigned long error_code, unsigned long address) { /* Are we prepared to handle this kernel fault? */ @@ -229,13 +229,13 @@ no_context(struct pt_regs *regs, unsigned long writeaccess, show_fault_oops(regs, address); - die("Oops", regs, writeaccess); + die("Oops", regs, error_code); bust_spinlocks(0); do_exit(SIGKILL); } static void -__bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, +__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, unsigned long address, int si_code) { struct task_struct *tsk = current; @@ -252,18 +252,18 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, return; } - no_context(regs, writeaccess, address); + no_context(regs, error_code, address); } static noinline void -bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, +bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, unsigned long address) { - __bad_area_nosemaphore(regs, writeaccess, address, SEGV_MAPERR); + __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); } static void -__bad_area(struct pt_regs *regs, unsigned long writeaccess, +__bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address, int si_code) { struct mm_struct *mm = current->mm; @@ -274,20 +274,20 @@ __bad_area(struct pt_regs *regs, unsigned long writeaccess, */ up_read(&mm->mmap_sem); - __bad_area_nosemaphore(regs, writeaccess, address, si_code); + __bad_area_nosemaphore(regs, error_code, address, si_code); } static noinline void -bad_area(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) +bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) { - __bad_area(regs, writeaccess, address, SEGV_MAPERR); + __bad_area(regs, error_code, address, SEGV_MAPERR); } static noinline void -bad_area_access_error(struct pt_regs *regs, unsigned long writeaccess, +bad_area_access_error(struct pt_regs *regs, unsigned long error_code, unsigned long address) { - __bad_area(regs, writeaccess, address, SEGV_ACCERR); + __bad_area(regs, error_code, address, SEGV_ACCERR); } static void out_of_memory(void) @@ -302,7 +302,7 @@ static void out_of_memory(void) } static void -do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) +do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; @@ -311,13 +311,13 @@ do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address /* Kernel mode? Handle exceptions or die: */ if (!user_mode(regs)) - no_context(regs, writeaccess, address); + no_context(regs, error_code, address); force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); } static noinline int -mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, +mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, unsigned int fault) { /* @@ -328,7 +328,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, if (!(fault & VM_FAULT_RETRY)) up_read(¤t->mm->mmap_sem); if (!user_mode(regs)) - no_context(regs, writeaccess, address); + no_context(regs, error_code, address); return 1; } @@ -339,14 +339,14 @@ mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, /* Kernel mode? Handle exceptions or die: */ if (!user_mode(regs)) { up_read(¤t->mm->mmap_sem); - no_context(regs, writeaccess, address); + no_context(regs, error_code, address); return 1; } out_of_memory(); } else { if (fault & VM_FAULT_SIGBUS) - do_sigbus(regs, writeaccess, address); + do_sigbus(regs, error_code, address); else BUG(); } @@ -381,7 +381,7 @@ static int fault_in_kernel_space(unsigned long address) * routines. */ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, - unsigned long writeaccess, + unsigned long error_code, unsigned long address) { unsigned long vec; @@ -389,8 +389,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, struct mm_struct *mm; struct vm_area_struct * vma; int fault; + int write = error_code & FAULT_CODE_WRITE; unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | - (writeaccess ? FAULT_FLAG_WRITE : 0)); + (write ? FAULT_FLAG_WRITE : 0)); tsk = current; mm = tsk->mm; @@ -411,7 +412,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if (notify_page_fault(regs, vec)) return; - bad_area_nosemaphore(regs, writeaccess, address); + bad_area_nosemaphore(regs, error_code, address); return; } @@ -429,7 +430,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * in an atomic region then we must not take the fault: */ if (unlikely(in_atomic() || !mm)) { - bad_area_nosemaphore(regs, writeaccess, address); + bad_area_nosemaphore(regs, error_code, address); return; } @@ -438,17 +439,17 @@ retry: vma = find_vma(mm, address); if (unlikely(!vma)) { - bad_area(regs, writeaccess, address); + bad_area(regs, error_code, address); return; } if (likely(vma->vm_start <= address)) goto good_area; if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, writeaccess, address); + bad_area(regs, error_code, address); return; } if (unlikely(expand_stack(vma, address))) { - bad_area(regs, writeaccess, address); + bad_area(regs, error_code, address); return; } @@ -457,11 +458,13 @@ retry: * we can handle it.. */ good_area: - if (unlikely(access_error(writeaccess, vma))) { - bad_area_access_error(regs, writeaccess, address); + if (unlikely(access_error(error_code, vma))) { + bad_area_access_error(regs, error_code, address); return; } + set_thread_fault_code(error_code); + /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -470,7 +473,7 @@ good_area: fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) - if (mm_fault_error(regs, writeaccess, address, fault)) + if (mm_fault_error(regs, error_code, address, fault)) return; if (flags & FAULT_FLAG_ALLOW_RETRY) { @@ -502,7 +505,7 @@ good_area: * Called with interrupts disabled. */ asmlinkage int __kprobes -handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, +handle_tlbmiss(struct pt_regs *regs, unsigned long error_code, unsigned long address) { pgd_t *pgd; @@ -535,10 +538,10 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, entry = *pte; if (unlikely(pte_none(entry) || pte_not_present(entry))) return 1; - if (unlikely(writeaccess && !pte_write(entry))) + if (unlikely(error_code && !pte_write(entry))) return 1; - if (writeaccess) + if (error_code) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); @@ -550,10 +553,11 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, * the case of an initial page write exception, so we need to * flush it in order to avoid potential TLB entry duplication. */ - if (writeaccess == 2) + if (error_code == FAULT_CODE_INITIAL) local_flush_tlb_one(get_asid(), address & PAGE_MASK); #endif + set_thread_fault_code(error_code); update_mmu_cache(NULL, address, pte); return 0; -- 2.30.2