mm: move zone watermark accesses behind an accessor
authorMel Gorman <mgorman@techsingularity.net>
Fri, 28 Dec 2018 08:35:44 +0000 (00:35 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Dec 2018 20:11:48 +0000 (12:11 -0800)
This is a preparation patch only, no functional change.

Link: http://lkml.kernel.org/r/20181123114528.28802-3-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Zi Yan <zi.yan@cs.rutgers.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mmzone.h
mm/compaction.c
mm/page_alloc.c

index bc0990c1f1c3cfb892a0e6028d1e9c80df285885..dcf1b66a96ab918088cb8b370e3088365e161d69 100644 (file)
@@ -269,9 +269,10 @@ enum zone_watermarks {
        NR_WMARK
 };
 
-#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
-#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
-#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
+#define min_wmark_pages(z) (z->_watermark[WMARK_MIN])
+#define low_wmark_pages(z) (z->_watermark[WMARK_LOW])
+#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH])
+#define wmark_pages(z, i) (z->_watermark[i])
 
 struct per_cpu_pages {
        int count;              /* number of pages in the list */
@@ -362,7 +363,7 @@ struct zone {
        /* Read-mostly fields */
 
        /* zone watermarks, access with *_wmark_pages(zone) macros */
-       unsigned long watermark[NR_WMARK];
+       unsigned long _watermark[NR_WMARK];
 
        unsigned long nr_reserved_highatomic;
 
index 7c607479de4a8b9c871f5b5dafbbc10784a7c6d4..ef29490b0f462349ec90b8672448f80627ef3af6 100644 (file)
@@ -1431,7 +1431,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
        if (is_via_compact_memory(order))
                return COMPACT_CONTINUE;
 
-       watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+       watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
        /*
         * If watermarks for high-order allocation are already met, there
         * should be no need for compaction at all.
index 251b8a0c9c5d91d08f207598c09bfa90db4196a3..2046e333ea8fa952adcbc69043b7041cc63c7fac 100644 (file)
@@ -3376,7 +3376,7 @@ retry:
                        }
                }
 
-               mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+               mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
                if (!zone_watermark_fast(zone, order, mark,
                                       ac_classzone_idx(ac), alloc_flags)) {
                        int ret;
@@ -4793,7 +4793,7 @@ long si_mem_available(void)
                pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
 
        for_each_zone(zone)
-               wmark_low += zone->watermark[WMARK_LOW];
+               wmark_low += low_wmark_pages(zone);
 
        /*
         * Estimate the amount of memory available for userspace allocations,
@@ -7431,13 +7431,13 @@ static void __setup_per_zone_wmarks(void)
 
                        min_pages = zone_managed_pages(zone) / 1024;
                        min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
-                       zone->watermark[WMARK_MIN] = min_pages;
+                       zone->_watermark[WMARK_MIN] = min_pages;
                } else {
                        /*
                         * If it's a lowmem zone, reserve a number of pages
                         * proportionate to the zone's size.
                         */
-                       zone->watermark[WMARK_MIN] = tmp;
+                       zone->_watermark[WMARK_MIN] = tmp;
                }
 
                /*
@@ -7449,8 +7449,8 @@ static void __setup_per_zone_wmarks(void)
                            mult_frac(zone_managed_pages(zone),
                                      watermark_scale_factor, 10000));
 
-               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
-               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
+               zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
+               zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
 
                spin_unlock_irqrestore(&zone->lock, flags);
        }