From 981bbf274b64496fd4376b44120d47dae4ca94e8 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Wed, 10 Apr 2019 10:55:07 +0800 Subject: [PATCH] csky: Fixup wrong update_mmu_cache implementation In our stress test, we found some crash problem caused by: if (!(vma->vm_flags & VM_EXEC)) return; in update_mmu_cache(). Seems current update_mmu_cache implementation is wrong and we retread to the conservative implementation. Also the usage of kmap_atomic in update_mmu_cache is risky, page-virtual may be scheduled out and changed, so we must use preempt_disable & pagefault_disable which is called by kmap_atomic(). Signed-off-by: Guo Ren Cc: Arnd Bergmann --- arch/csky/abiv2/cacheflush.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c index d22c95ffc74d..5bb887b275e1 100644 --- a/arch/csky/abiv2/cacheflush.c +++ b/arch/csky/abiv2/cacheflush.c @@ -34,10 +34,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, { unsigned long addr, pfn; struct page *page; - void *va; - - if (!(vma->vm_flags & VM_EXEC)) - return; pfn = pte_pfn(*pte); if (unlikely(!pfn_valid(pfn))) @@ -47,14 +43,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, if (page == ZERO_PAGE(0)) return; - va = page_address(page); - addr = (unsigned long) va; - - if (va == NULL && PageHighMem(page)) - addr = (unsigned long) kmap_atomic(page); + addr = (unsigned long) kmap_atomic(page); cache_wbinv_range(addr, addr + PAGE_SIZE); - if (va == NULL && PageHighMem(page)) - kunmap_atomic((void *) addr); + kunmap_atomic((void *) addr); } -- 2.30.2