From 595cd8f256d24face93b2722927ec9c980419c26 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 24 Feb 2017 14:59:19 -0800 Subject: [PATCH] mm/ksm: handle protnone saved writes when making page write protect Without this KSM will consider the page write protected, but a numa fault can later mark the page writable. This can result in memory corruption. Link: http://lkml.kernel.org/r/1487498625-10891-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable.h | 8 ++++++++ mm/ksm.c | 9 +++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 6265411eb2ed..f4ca23b158b3 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -233,6 +233,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define pte_mk_savedwrite pte_mkwrite #endif +#ifndef pte_clear_savedwrite +#define pte_clear_savedwrite pte_wrprotect +#endif + #ifndef pmd_savedwrite #define pmd_savedwrite pmd_write #endif @@ -241,6 +245,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define pmd_mk_savedwrite pmd_mkwrite #endif +#ifndef pmd_clear_savedwrite +#define pmd_clear_savedwrite pmd_wrprotect +#endif + #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, diff --git a/mm/ksm.c b/mm/ksm.c index 8960f6ecbc12..cf211c01ceac 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -880,7 +880,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; - if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) { + if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || + (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) { pte_t entry; swapped = PageSwapCache(page); @@ -905,7 +906,11 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, } if (pte_dirty(entry)) set_page_dirty(page); - entry = pte_mkclean(pte_wrprotect(entry)); + + if (pte_protnone(entry)) + entry = pte_mkclean(pte_clear_savedwrite(entry)); + else + entry = pte_mkclean(pte_wrprotect(entry)); set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); } *orig_pte = *pvmw.pte; -- 2.30.2