From: Felix Fietkau Date: Sun, 24 Jan 2016 00:16:27 +0000 (+0000) Subject: kernel: fix MIPS highmem breakage X-Git-Url: http://git.cdn.openwrt.org/?a=commitdiff_plain;h=e756c2bb074207857bb7780cfd31a04eda745635;p=openwrt%2Fstaging%2Fneocturne.git kernel: fix MIPS highmem breakage Signed-off-by: Felix Fietkau SVN-Revision: 48468 --- diff --git a/target/linux/generic/patches-4.3/101-MIPS-fix-cache-flushing-for-highmem-pages.patch b/target/linux/generic/patches-4.3/101-MIPS-fix-cache-flushing-for-highmem-pages.patch new file mode 100644 index 0000000000..d7220af9af --- /dev/null +++ b/target/linux/generic/patches-4.3/101-MIPS-fix-cache-flushing-for-highmem-pages.patch @@ -0,0 +1,95 @@ +From: Felix Fietkau +Date: Sun, 24 Jan 2016 01:03:51 +0100 +Subject: [PATCH] MIPS: fix cache flushing for highmem pages + +Most cache flush ops were no-op for highmem pages. This led to nasty +segfaults and (in the case of page_address(page) == NULL) kernel +crashes. + +Fix this by always flushing highmem pages using kmap/kunmap_atomic +around the actual cache flush. This might be a bit inefficient, but at +least it's stable. + +Signed-off-by: Felix Fietkau +--- + +--- a/arch/mips/mm/cache.c ++++ b/arch/mips/mm/cache.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -78,18 +79,29 @@ SYSCALL_DEFINE3(cacheflush, unsigned lon + return 0; + } + ++static void ++flush_highmem_page(struct page *page) ++{ ++ void *addr = kmap_atomic(page); ++ flush_data_cache_page((unsigned long)addr); ++ kunmap_atomic(addr); ++} ++ + void __flush_dcache_page(struct page *page) + { + struct address_space *mapping = page_mapping(page); + unsigned long addr; + +- if (PageHighMem(page)) +- return; + if (mapping && !mapping_mapped(mapping)) { + SetPageDcacheDirty(page); + return; + } + ++ if (PageHighMem(page)) { ++ flush_highmem_page(page); ++ return; ++ } ++ + /* + * We could delay the flush for the !page_mapping case too. But that + * case is for exec env/arg pages and those are %99 certainly going to +@@ -105,6 +117,11 @@ void __flush_anon_page(struct page *page + { + unsigned long addr = (unsigned long) page_address(page); + ++ if (PageHighMem(page)) { ++ flush_highmem_page(page); ++ return; ++ } ++ + if (pages_do_alias(addr, vmaddr)) { + if (page_mapped(page) && !Page_dcache_dirty(page)) { + void *kaddr; +@@ -123,8 +140,10 @@ void __flush_icache_page(struct vm_area_ + { + unsigned long addr; + +- if (PageHighMem(page)) ++ if (PageHighMem(page)) { ++ flush_highmem_page(page); + return; ++ } + + addr = (unsigned long) page_address(page); + flush_data_cache_page(addr); +@@ -142,7 +161,12 @@ void __update_cache(struct vm_area_struc + if (unlikely(!pfn_valid(pfn))) + return; + page = pfn_to_page(pfn); +- if (page_mapping(page) && Page_dcache_dirty(page)) { ++ if (!Page_dcache_dirty(page)) ++ return; ++ ++ if (PageHighMem(page)) { ++ flush_highmem_page(page); ++ } else if (page_mapping(page)) { + addr = (unsigned long) page_address(page); + if (exec || pages_do_alias(addr, address & PAGE_MASK)) + flush_data_cache_page(addr); diff --git a/target/linux/generic/patches-4.4/101-MIPS-fix-cache-flushing-for-highmem-pages.patch b/target/linux/generic/patches-4.4/101-MIPS-fix-cache-flushing-for-highmem-pages.patch new file mode 100644 index 0000000000..d7220af9af --- /dev/null +++ b/target/linux/generic/patches-4.4/101-MIPS-fix-cache-flushing-for-highmem-pages.patch @@ -0,0 +1,95 @@ +From: Felix Fietkau +Date: Sun, 24 Jan 2016 01:03:51 +0100 +Subject: [PATCH] MIPS: fix cache flushing for highmem pages + +Most cache flush ops were no-op for highmem pages. This led to nasty +segfaults and (in the case of page_address(page) == NULL) kernel +crashes. + +Fix this by always flushing highmem pages using kmap/kunmap_atomic +around the actual cache flush. This might be a bit inefficient, but at +least it's stable. + +Signed-off-by: Felix Fietkau +--- + +--- a/arch/mips/mm/cache.c ++++ b/arch/mips/mm/cache.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -78,18 +79,29 @@ SYSCALL_DEFINE3(cacheflush, unsigned lon + return 0; + } + ++static void ++flush_highmem_page(struct page *page) ++{ ++ void *addr = kmap_atomic(page); ++ flush_data_cache_page((unsigned long)addr); ++ kunmap_atomic(addr); ++} ++ + void __flush_dcache_page(struct page *page) + { + struct address_space *mapping = page_mapping(page); + unsigned long addr; + +- if (PageHighMem(page)) +- return; + if (mapping && !mapping_mapped(mapping)) { + SetPageDcacheDirty(page); + return; + } + ++ if (PageHighMem(page)) { ++ flush_highmem_page(page); ++ return; ++ } ++ + /* + * We could delay the flush for the !page_mapping case too. But that + * case is for exec env/arg pages and those are %99 certainly going to +@@ -105,6 +117,11 @@ void __flush_anon_page(struct page *page + { + unsigned long addr = (unsigned long) page_address(page); + ++ if (PageHighMem(page)) { ++ flush_highmem_page(page); ++ return; ++ } ++ + if (pages_do_alias(addr, vmaddr)) { + if (page_mapped(page) && !Page_dcache_dirty(page)) { + void *kaddr; +@@ -123,8 +140,10 @@ void __flush_icache_page(struct vm_area_ + { + unsigned long addr; + +- if (PageHighMem(page)) ++ if (PageHighMem(page)) { ++ flush_highmem_page(page); + return; ++ } + + addr = (unsigned long) page_address(page); + flush_data_cache_page(addr); +@@ -142,7 +161,12 @@ void __update_cache(struct vm_area_struc + if (unlikely(!pfn_valid(pfn))) + return; + page = pfn_to_page(pfn); +- if (page_mapping(page) && Page_dcache_dirty(page)) { ++ if (!Page_dcache_dirty(page)) ++ return; ++ ++ if (PageHighMem(page)) { ++ flush_highmem_page(page); ++ } else if (page_mapping(page)) { + addr = (unsigned long) page_address(page); + if (exec || pages_do_alias(addr, address & PAGE_MASK)) + flush_data_cache_page(addr);