From 1592eef01505177ed50149795a1560ec5a139df1 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 7 Apr 2014 15:37:22 -0700 Subject: [PATCH] mm: add debugfs tunable for fault_around_order Let's allow people to tweak faultaround at runtime. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Kirill A. Shutemov Cc: Linus Torvalds Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Matthew Wilcox Cc: Dave Hansen Cc: Alexander Viro Cc: Dave Chinner Cc: Ning Qu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 61 insertions(+), 7 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 4eefb7e31521..1b88da5c08b3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include @@ -3382,8 +3383,63 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, } #define FAULT_AROUND_ORDER 4 -#define FAULT_AROUND_PAGES (1UL << FAULT_AROUND_ORDER) -#define FAULT_AROUND_MASK ~((1UL << (PAGE_SHIFT + FAULT_AROUND_ORDER)) - 1) + +#ifdef CONFIG_DEBUG_FS +static unsigned int fault_around_order = FAULT_AROUND_ORDER; + +static int fault_around_order_get(void *data, u64 *val) +{ + *val = fault_around_order; + return 0; +} + +static int fault_around_order_set(void *data, u64 val) +{ + BUILD_BUG_ON((1UL << FAULT_AROUND_ORDER) > PTRS_PER_PTE); + if (1UL << val > PTRS_PER_PTE) + return -EINVAL; + fault_around_order = val; + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(fault_around_order_fops, + fault_around_order_get, fault_around_order_set, "%llu\n"); + +static int __init fault_around_debugfs(void) +{ + void *ret; + + ret = debugfs_create_file("fault_around_order", 0644, NULL, NULL, + &fault_around_order_fops); + if (!ret) + pr_warn("Failed to create fault_around_order in debugfs"); + return 0; +} +late_initcall(fault_around_debugfs); + +static inline unsigned long fault_around_pages(void) +{ + return 1UL << fault_around_order; +} + +static inline unsigned long fault_around_mask(void) +{ + return ~((1UL << (PAGE_SHIFT + fault_around_order)) - 1); +} +#else +static inline unsigned long fault_around_pages(void) +{ + unsigned long nr_pages; + + nr_pages = 1UL << FAULT_AROUND_ORDER; + BUILD_BUG_ON(nr_pages > PTRS_PER_PTE); + return nr_pages; +} + +static inline unsigned long fault_around_mask(void) +{ + return ~((1UL << (PAGE_SHIFT + FAULT_AROUND_ORDER)) - 1); +} +#endif static void do_fault_around(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pgoff_t pgoff, unsigned int flags) @@ -3393,21 +3449,19 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address, struct vm_fault vmf; int off; - BUILD_BUG_ON(FAULT_AROUND_PAGES > PTRS_PER_PTE); - - start_addr = max(address & FAULT_AROUND_MASK, vma->vm_start); + start_addr = max(address & fault_around_mask(), vma->vm_start); off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); pte -= off; pgoff -= off; /* * max_pgoff is either end of page table or end of vma - * or FAULT_AROUND_PAGES from pgoff, depending what is neast. + * or fault_around_pages() from pgoff, depending what is neast. */ max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + PTRS_PER_PTE - 1; max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1, - pgoff + FAULT_AROUND_PAGES - 1); + pgoff + fault_around_pages() - 1); /* Check if it makes any sense to call ->map_pages */ while (!pte_none(*pte)) { -- 2.30.2