Patch series "mm: convert totalram_pages, totalhigh_pages and managed
pages to atomic", v5.
This series converts totalram_pages, totalhigh_pages and
zone->managed_pages to atomic variables.
totalram_pages, zone->managed_pages and totalhigh_pages updates are
protected by managed_page_count_lock, but readers never care about it.
Convert these variables to atomic to avoid readers potentially seeing a
store tear.
Main motivation was that managed_page_count_lock handling was complicating
things. It was discussed in length here,
https://lore.kernel.org/patchwork/patch/995739/#
1181785 It seemes better
to remove the lock and convert variables to atomic. With the change,
preventing poteintial store-to-read tearing comes as a bonus.
This patch (of 4):
This is in preparation to a later patch which converts totalram_pages and
zone->managed_pages to atomic variables. Please note that re-reading the
value might lead to a different value and as such it could lead to
unexpected behavior. There are no known bugs as a result of the current
code but it is better to prevent from them in principle.
Link: http://lkml.kernel.org/r/1542090790-21750-2-git-send-email-arunks@codeaurora.org
Signed-off-by: Arun KS <arunks@codeaurora.org>
Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
/* this will put all low memory onto the freelists */
memblock_free_all();
max_low_pfn = totalram_pages;
- max_pfn = totalram_pages;
+ max_pfn = max_low_pfn;
mem_init_print_info(NULL);
kmalloc_ok = 1;
}
size_t len, loff_t *ppos)
{
ssize_t ret = -EINVAL;
+ unsigned long nr_pages = totalram_pages;
- if ((len >> PAGE_SHIFT) > totalram_pages) {
- pr_err("too much data (max %ld pages)\n", totalram_pages);
+ if ((len >> PAGE_SHIFT) > nr_pages) {
+ pr_err("too much data (max %ld pages)\n", nr_pages);
return ret;
}
static unsigned long compute_balloon_floor(void)
{
unsigned long min_pages;
+ unsigned long nr_pages = totalram_pages;
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
/* Simple continuous piecewiese linear function:
* max MiB -> min MiB gradient
* 8192 744 (1/16)
* 32768 1512 (1/32)
*/
- if (totalram_pages < MB2PAGES(128))
- min_pages = MB2PAGES(8) + (totalram_pages >> 1);
- else if (totalram_pages < MB2PAGES(512))
- min_pages = MB2PAGES(40) + (totalram_pages >> 2);
- else if (totalram_pages < MB2PAGES(2048))
- min_pages = MB2PAGES(104) + (totalram_pages >> 3);
- else if (totalram_pages < MB2PAGES(8192))
- min_pages = MB2PAGES(232) + (totalram_pages >> 4);
+ if (nr_pages < MB2PAGES(128))
+ min_pages = MB2PAGES(8) + (nr_pages >> 1);
+ else if (nr_pages < MB2PAGES(512))
+ min_pages = MB2PAGES(40) + (nr_pages >> 2);
+ else if (nr_pages < MB2PAGES(2048))
+ min_pages = MB2PAGES(104) + (nr_pages >> 3);
+ else if (nr_pages < MB2PAGES(8192))
+ min_pages = MB2PAGES(232) + (nr_pages >> 4);
else
- min_pages = MB2PAGES(488) + (totalram_pages >> 5);
+ min_pages = MB2PAGES(488) + (nr_pages >> 5);
#undef MB2PAGES
return min_pages;
}
void __init files_maxfiles_init(void)
{
unsigned long n;
- unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
+ unsigned long nr_pages = totalram_pages;
+ unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
- memreserve = min(memreserve, totalram_pages - 1);
- n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
+ memreserve = min(memreserve, nr_pages - 1);
+ n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
}
static void set_max_threads(unsigned int max_threads_suggested)
{
u64 threads;
+ unsigned long nr_pages = totalram_pages;
/*
* The number of threads shall be limited such that the thread
* structures may only consume a small part of the available memory.
*/
- if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
+ if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
threads = MAX_THREADS;
else
- threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
+ threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
(u64) THREAD_SIZE * 8UL);
if (threads > max_threads_suggested)
int i;
unsigned long nr_segments = image->nr_segments;
unsigned long total_pages = 0;
+ unsigned long nr_pages = totalram_pages;
/*
* Verify we have good destination addresses. The caller is
* wasted allocating pages, which can cause a soft lockup.
*/
for (i = 0; i < nr_segments; i++) {
- if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
+ if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
return -EINVAL;
total_pages += PAGE_COUNT(image->segment[i].memsz);
}
- if (total_pages > totalram_pages / 2)
+ if (total_pages > nr_pages / 2)
return -EINVAL;
/*
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
long max = 0;
+ unsigned long managed_pages = zone->managed_pages;
/* Find valid and maximum lowmem_reserve in the zone */
for (j = i; j < MAX_NR_ZONES; j++) {
/* we treat the high watermark as reserved pages. */
max += high_wmark_pages(zone);
- if (max > zone->managed_pages)
- max = zone->managed_pages;
+ if (max > managed_pages)
+ max = managed_pages;
pgdat->totalreserve_pages += max;
static unsigned long shmem_default_max_inodes(void)
{
- return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
+ unsigned long nr_pages = totalram_pages;
+ return min(nr_pages - totalhigh_pages, nr_pages / 2);
}
#endif
static int __init dccp_init(void)
{
unsigned long goal;
+ unsigned long nr_pages = totalram_pages;
int ehash_order, bhash_order, i;
int rc;
*
* The methodology is similar to that of the buffer cache.
*/
- if (totalram_pages >= (128 * 1024))
- goal = totalram_pages >> (21 - PAGE_SHIFT);
+ if (nr_pages >= (128 * 1024))
+ goal = nr_pages >> (21 - PAGE_SHIFT);
else
- goal = totalram_pages >> (23 - PAGE_SHIFT);
+ goal = nr_pages >> (23 - PAGE_SHIFT);
if (thash_entries)
goal = (thash_entries *
int nf_conntrack_init_start(void)
{
+ unsigned long nr_pages = totalram_pages;
int max_factor = 8;
int ret = -ENOMEM;
int i;
* >= 4GB machines have 65536 buckets.
*/
nf_conntrack_htable_size
- = (((totalram_pages << PAGE_SHIFT) / 16384)
+ = (((nr_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head));
- if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
+ if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
nf_conntrack_htable_size = 65536;
- else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 16384;
if (nf_conntrack_htable_size < 32)
nf_conntrack_htable_size = 32;
struct xt_hashlimit_htable *hinfo;
const struct seq_operations *ops;
unsigned int size, i;
+ unsigned long nr_pages = totalram_pages;
int ret;
if (cfg->size) {
size = cfg->size;
} else {
- size = (totalram_pages << PAGE_SHIFT) / 16384 /
+ size = (nr_pages << PAGE_SHIFT) / 16384 /
sizeof(struct hlist_head);
- if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
+ if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
size = 8192;
if (size < 16)
size = 16;
int status = -EINVAL;
unsigned long goal;
unsigned long limit;
+ unsigned long nr_pages = totalram_pages;
int max_share;
int order;
int num_entries;
* The methodology is similar to that of the tcp hash tables.
* Though not identical. Start by getting a goal size
*/
- if (totalram_pages >= (128 * 1024))
- goal = totalram_pages >> (22 - PAGE_SHIFT);
+ if (nr_pages >= (128 * 1024))
+ goal = nr_pages >> (22 - PAGE_SHIFT);
else
- goal = totalram_pages >> (24 - PAGE_SHIFT);
+ goal = nr_pages >> (24 - PAGE_SHIFT);
/* Then compute the page order for said goal */
order = get_order(goal);