x86/mm: Make __VIRTUAL_MASK_SHIFT dynamic
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Wed, 14 Feb 2018 11:16:55 +0000 (14:16 +0300)
committerIngo Molnar <mingo@kernel.org>
Wed, 14 Feb 2018 12:11:15 +0000 (13:11 +0100)
For boot-time switching between paging modes, we need to be able to
adjust virtual mask shifts.

The change doesn't affect the kernel image size much:

   text    data     bss     dec     hex filename
8628892 4734340 1368064 14731296  e0c820 vmlinux.before
8628966 4734340 1368064 14731370  e0c86a vmlinux.after

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180214111656.88514-9-kirill.shutemov@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/entry/entry_64.S
arch/x86/include/asm/page_64_types.h
arch/x86/mm/dump_pagetables.c
arch/x86/mm/kaslr.c

index 30c8c5344c4a5dcfeb96d0711a322e50de33d324..2c06348b780783c28ade42d8543e61ff314406ac 100644 (file)
@@ -274,8 +274,20 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
         * Change top bits to match most significant bit (47th or 56th bit
         * depending on paging mode) in the address.
         */
+#ifdef CONFIG_X86_5LEVEL
+       testl   $1, pgtable_l5_enabled(%rip)
+       jz      1f
+       shl     $(64 - 57), %rcx
+       sar     $(64 - 57), %rcx
+       jmp     2f
+1:
+       shl     $(64 - 48), %rcx
+       sar     $(64 - 48), %rcx
+2:
+#else
        shl     $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
        sar     $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
+#endif
 
        /* If this changed %rcx, it was not canonical */
        cmpq    %rcx, %r11
index d54a3d5b5b3be9835b417dd7e0dc3dc3ca6d14d5..fa7dc7cd8c19cfc475fc9c44f87ad796a5a69ae9 100644 (file)
@@ -56,7 +56,7 @@
 #define __PHYSICAL_MASK_SHIFT  52
 
 #ifdef CONFIG_X86_5LEVEL
-#define __VIRTUAL_MASK_SHIFT   56
+#define __VIRTUAL_MASK_SHIFT   (pgtable_l5_enabled ? 56 : 47)
 #else
 #define __VIRTUAL_MASK_SHIFT   47
 #endif
index 420058b05d3927c6af5b295b75b71f9ea39a75b1..9efee6f464abfee04f1d502845ce430d3d4fd145 100644 (file)
@@ -85,8 +85,12 @@ static struct addr_marker address_markers[] = {
        [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
        [VMEMMAP_START_NR]      = { 0UL,                "Vmemmap" },
 #ifdef CONFIG_KASAN
-       [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" },
-       [KASAN_SHADOW_END_NR]   = { KASAN_SHADOW_END,   "KASAN shadow end" },
+       /*
+        * These fields get initialized with the (dynamic)
+        * KASAN_SHADOW_{START,END} values in pt_dump_init().
+        */
+       [KASAN_SHADOW_START_NR] = { 0UL,                "KASAN shadow" },
+       [KASAN_SHADOW_END_NR]   = { 0UL,                "KASAN shadow end" },
 #endif
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
        [LDT_NR]                = { 0UL,                "LDT remap" },
@@ -571,6 +575,10 @@ static int __init pt_dump_init(void)
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
        address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
 #endif
+#ifdef CONFIG_KASAN
+       address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
+       address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
+#endif
 #endif
 #ifdef CONFIG_X86_32
        address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
index 515b98a8cceef5f80e924aa755e41534d006080e..d079878c6cbca2382db70404b5b8ea47572761d7 100644 (file)
@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
        unsigned long *base;
        unsigned long size_tb;
 } kaslr_regions[] = {
-       { &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
+       { &page_offset_base, 0 },
        { &vmalloc_base, VMALLOC_SIZE_TB },
        { &vmemmap_base, 1 },
 };
@@ -93,6 +93,8 @@ void __init kernel_randomize_memory(void)
        if (!kaslr_memory_enabled())
                return;
 
+       kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
+
        /*
         * Update Physical memory mapping to available and
         * add padding if needed (especially for memory hotplug support).