s390/pgtable: add mapping statistics
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Fri, 20 May 2016 06:08:14 +0000 (08:08 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 13 Jun 2016 13:58:16 +0000 (15:58 +0200)
Add statistics that show how memory is mapped within the kernel
identity mapping. This is more or less the same like git
commit ce0c0e50f94e ("x86, generic: CPA add statistics about state
of direct mapping v4") for x86.

I also intentionally copied the lower case "k" within DirectMap4k vs
the upper case "M" and "G" within the two other lines. Let's have
consistent inconsistencies across architectures.

The output of /proc/meminfo now contains these additional lines:

DirectMap4k:        2048 kB
DirectMap1M:     3991552 kB
DirectMap2G:     4194304 kB

The implementation on s390 is lockless unlike the x86 version, since I
assume changes to the kernel mapping are a very rare event. Therefore
it really doesn't matter if these statistics could potentially be
inconsistent if read while kernel pages tables are being changed.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/pgtable.h
arch/s390/mm/pageattr.c
arch/s390/mm/vmem.c

index 9133388edd1f88b6f7db88624e00c9e890579c85..3038edb12cad426e51efb23576719220aed9ea7d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/mm_types.h>
 #include <linux/page-flags.h>
 #include <linux/radix-tree.h>
+#include <linux/atomic.h>
 #include <asm/bug.h>
 #include <asm/page.h>
 
@@ -37,6 +38,24 @@ extern void vmem_map_init(void);
 pmd_t *vmem_pmd_alloc(void);
 pte_t *vmem_pte_alloc(void);
 
+enum {
+       PG_DIRECT_MAP_4K = 0,
+       PG_DIRECT_MAP_1M,
+       PG_DIRECT_MAP_2G,
+       PG_DIRECT_MAP_MAX
+};
+
+extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+
+static inline void update_page_count(int level, long count)
+{
+       if (IS_ENABLED(CONFIG_PROC_FS))
+               atomic_long_add(count, &direct_pages_count[level]);
+}
+
+struct seq_file;
+void arch_report_meminfo(struct seq_file *m);
+
 /*
  * The S390 doesn't have any external MMU info: the kernel page
  * tables contain all the necessary information.
index 91e5e29c1f5c8608abc1bebd0cffa8553a16755b..ba124d9c96bae7e93a055e374f7323520dcc1609 100644 (file)
@@ -40,6 +40,20 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
 }
 #endif
 
+#ifdef CONFIG_PROC_FS
+atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+
+void arch_report_meminfo(struct seq_file *m)
+{
+       seq_printf(m, "DirectMap4k:    %8lu kB\n",
+                  atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
+       seq_printf(m, "DirectMap1M:    %8lu kB\n",
+                  atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
+       seq_printf(m, "DirectMap2G:    %8lu kB\n",
+                  atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
+}
+#endif /* CONFIG_PROC_FS */
+
 static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
                    unsigned long dtt)
 {
@@ -114,6 +128,8 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
        }
        pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
        pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
+       update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
+       update_page_count(PG_DIRECT_MAP_1M, -1);
        return 0;
 }
 
@@ -181,6 +197,8 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
        }
        pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
        pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
+       update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
+       update_page_count(PG_DIRECT_MAP_2G, -1);
        return 0;
 }
 
index b200f976c36bc64e42b65ea4a2d81a3a7821c54c..a1e7c0b207e6e0533cde952d7c881307af8c9422 100644 (file)
@@ -80,6 +80,7 @@ pte_t __ref *vmem_pte_alloc(void)
  */
 static int vmem_add_mem(unsigned long start, unsigned long size)
 {
+       unsigned long pages4k, pages1m, pages2g;
        unsigned long end = start + size;
        unsigned long address = start;
        pgd_t *pg_dir;
@@ -88,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
        pte_t *pt_dir;
        int ret = -ENOMEM;
 
+       pages4k = pages1m = pages2g = 0;
        while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
@@ -102,6 +104,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
                     !debug_pagealloc_enabled()) {
                        pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
                        address += PUD_SIZE;
+                       pages2g++;
                        continue;
                }
                if (pud_none(*pu_dir)) {
@@ -116,6 +119,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
                    !debug_pagealloc_enabled()) {
                        pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
                        address += PMD_SIZE;
+                       pages1m++;
                        continue;
                }
                if (pmd_none(*pm_dir)) {
@@ -128,9 +132,13 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
                pt_dir = pte_offset_kernel(pm_dir, address);
                pte_val(*pt_dir) = address |  pgprot_val(PAGE_KERNEL);
                address += PAGE_SIZE;
+               pages4k++;
        }
        ret = 0;
 out:
+       update_page_count(PG_DIRECT_MAP_4K, pages4k);
+       update_page_count(PG_DIRECT_MAP_1M, pages1m);
+       update_page_count(PG_DIRECT_MAP_2G, pages2g);
        return ret;
 }
 
@@ -140,6 +148,7 @@ out:
  */
 static void vmem_remove_range(unsigned long start, unsigned long size)
 {
+       unsigned long pages4k, pages1m, pages2g;
        unsigned long end = start + size;
        unsigned long address = start;
        pgd_t *pg_dir;
@@ -147,6 +156,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
        pmd_t *pm_dir;
        pte_t *pt_dir;
 
+       pages4k = pages1m = pages2g = 0;
        while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
@@ -161,6 +171,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
                if (pud_large(*pu_dir)) {
                        pud_clear(pu_dir);
                        address += PUD_SIZE;
+                       pages2g++;
                        continue;
                }
                pm_dir = pmd_offset(pu_dir, address);
@@ -171,13 +182,18 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
                if (pmd_large(*pm_dir)) {
                        pmd_clear(pm_dir);
                        address += PMD_SIZE;
+                       pages1m++;
                        continue;
                }
                pt_dir = pte_offset_kernel(pm_dir, address);
                pte_clear(&init_mm, address, pt_dir);
                address += PAGE_SIZE;
+               pages4k++;
        }
        flush_tlb_kernel_range(start, end);
+       update_page_count(PG_DIRECT_MAP_4K, -pages4k);
+       update_page_count(PG_DIRECT_MAP_1M, -pages1m);
+       update_page_count(PG_DIRECT_MAP_2G, -pages2g);
 }
 
 /*