arm64: mm: enable per pmd page table lock
authorYu Zhao <yuzhao@google.com>
Tue, 12 Mar 2019 00:57:49 +0000 (18:57 -0600)
committerWill Deacon <will.deacon@arm.com>
Tue, 9 Apr 2019 10:21:50 +0000 (11:21 +0100)
Switch from per mm_struct to per pmd page table lock by enabling
ARCH_ENABLE_SPLIT_PMD_PTLOCK. This provides better granularity for
large system.

I'm not sure if there is contention on mm->page_table_lock. Given
the option comes at no cost (apart from initializing more spin
locks), why not enable it now.

We only do so when pmd is not folded, so we don't mistakenly call
pgtable_pmd_page_ctor() on pud or p4d in pgd_pgtable_alloc().

Signed-off-by: Yu Zhao <yuzhao@google.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/Kconfig
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/tlb.h

index 7e34b9eba5de151572ba479d73ccf82ba18e8beb..555af50355920da5af0c5a1c9653ab110bb1f33c 100644 (file)
@@ -889,6 +889,9 @@ config ARCH_WANT_HUGE_PMD_SHARE
 config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
 
+config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+       def_bool y if PGTABLE_LEVELS > 2
+
 config SECCOMP
        bool "Enable seccomp to safely compute untrusted bytecode"
        ---help---
index 52fa47c73bf042efc0c84796dcf1a21c10c534c1..dabba4b2c61f17fa9898777b12c549d07517fc77 100644 (file)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pmd_t *)__get_free_page(PGALLOC_GFP);
+       struct page *page;
+
+       page = alloc_page(PGALLOC_GFP);
+       if (!page)
+               return NULL;
+       if (!pgtable_pmd_page_ctor(page)) {
+               __free_page(page);
+               return NULL;
+       }
+       return page_address(page);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
 {
        BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
+       pgtable_pmd_page_dtor(virt_to_page(pmdp));
        free_page((unsigned long)pmdp);
 }
 
index 106fdc951b6eefdda0a97c877c2493b7bdfac1f8..4e3becfed38776f5a105ce144080c73fb00a268d 100644 (file)
@@ -62,7 +62,10 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
                                  unsigned long addr)
 {
-       tlb_remove_table(tlb, virt_to_page(pmdp));
+       struct page *page = virt_to_page(pmdp);
+
+       pgtable_pmd_page_dtor(page);
+       tlb_remove_table(tlb, page);
 }
 #endif