arm64: choose memstart_addr based on minimum sparsemem section alignment
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Wed, 30 Mar 2016 12:25:47 +0000 (14:25 +0200)
committerWill Deacon <will.deacon@arm.com>
Thu, 14 Apr 2016 15:15:08 +0000 (16:15 +0100)
This redefines ARM64_MEMSTART_ALIGN in terms of the minimal alignment
required by sparsemem vmemmap. This comes down to using 1 GB for all
translation granules if CONFIG_SPARSEMEM_VMEMMAP is enabled.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/include/asm/kernel-pgtable.h

index 5c6375d8528bb8ddd313bfa2911f7a0d77819028..7e51d1b57c0c56461a9be0fb825fa84a9faeea8b 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/sparsemem.h>
 
 /*
  * The linear mapping and the start of memory are both 2M aligned (per
  * (64k granule), or a multiple that can be mapped using contiguous bits
  * in the page tables: 32 * PMD_SIZE (16k granule)
  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define ARM64_MEMSTART_ALIGN   SZ_512M
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ARM64_MEMSTART_SHIFT           PUD_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ARM64_MEMSTART_SHIFT           (PMD_SHIFT + 5)
 #else
-#define ARM64_MEMSTART_ALIGN   SZ_1G
+#define ARM64_MEMSTART_SHIFT           PMD_SHIFT
+#endif
+
+/*
+ * sparsemem vmemmap imposes an additional requirement on the alignment of
+ * memstart_addr, due to the fact that the base of the vmemmap region
+ * has a direct correspondence, and needs to appear sufficiently aligned
+ * in the virtual address space.
+ */
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#define ARM64_MEMSTART_ALIGN   (1UL << SECTION_SIZE_BITS)
+#else
+#define ARM64_MEMSTART_ALIGN   (1UL << ARM64_MEMSTART_SHIFT)
 #endif
 
 #endif /* __ASM_KERNEL_PGTABLE_H */