f8baf9befc5b0f1b86cc529557e9b38db626ac17
[openwrt/staging/svanheule.git] /
1 From 7b14f3444cf6b54b9905f2e7e1507e68fd92a9c0 Mon Sep 17 00:00:00 2001
2 From: David Plowman <david.plowman@raspberrypi.com>
3 Date: Tue, 29 Mar 2022 16:10:06 +0100
4 Subject: [PATCH 0359/1085] mm,page_alloc,cma: introduce a customisable
5 threshold for allocating pages in cma
6
7 On some platforms the cma area can be half the entire system memory,
8 meaning that allocations start happening in the cma area immediately.
9 This leads to fragmentation and subsequent fatal cma_alloc failures.
10
11 We introduce an "alloc_in_cma_threshold" parameter which requires that
12 this many sixteenths of the free pages must be in cma before it will
13 try to use them. By default this is set to 12, but the previous
14 behaviour can be restored by setting it to 8 on startup.
15
16 Signed-off-by: David Plowman <david.plowman@raspberrypi.com>
17 ---
18 mm/page_alloc.c | 28 +++++++++++++++++++++++++---
19 1 file changed, 25 insertions(+), 3 deletions(-)
20
21 --- a/mm/page_alloc.c
22 +++ b/mm/page_alloc.c
23 @@ -204,6 +204,27 @@ EXPORT_SYMBOL(node_states);
24
25 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
26
27 +#define ALLOC_IN_CMA_THRESHOLD_MAX 16
28 +#define ALLOC_IN_CMA_THRESHOLD_DEFAULT 12
29 +
30 +static unsigned long _alloc_in_cma_threshold __read_mostly
31 + = ALLOC_IN_CMA_THRESHOLD_DEFAULT;
32 +
33 +static int __init alloc_in_cma_threshold_setup(char *buf)
34 +{
35 + unsigned long res;
36 +
37 + if (kstrtoul(buf, 10, &res) < 0 ||
38 + res > ALLOC_IN_CMA_THRESHOLD_MAX) {
39 + pr_err("Bad alloc_cma_threshold value\n");
40 + return 0;
41 + }
42 + _alloc_in_cma_threshold = res;
43 + pr_info("Setting alloc_in_cma_threshold to %lu\n", res);
44 + return 0;
45 +}
46 +early_param("alloc_in_cma_threshold", alloc_in_cma_threshold_setup);
47 +
48 /*
49 * A cached value of the page's pageblock's migratetype, used when the page is
50 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
51 @@ -2094,12 +2115,13 @@ __rmqueue(struct zone *zone, unsigned in
52 if (IS_ENABLED(CONFIG_CMA)) {
53 /*
54 * Balance movable allocations between regular and CMA areas by
55 - * allocating from CMA when over half of the zone's free memory
56 - * is in the CMA area.
57 + * allocating from CMA when over more than a given proportion of
58 + * the zone's free memory is in the CMA area.
59 */
60 if (alloc_flags & ALLOC_CMA &&
61 zone_page_state(zone, NR_FREE_CMA_PAGES) >
62 - zone_page_state(zone, NR_FREE_PAGES) / 2) {
63 + zone_page_state(zone, NR_FREE_PAGES) / ALLOC_IN_CMA_THRESHOLD_MAX
64 + * _alloc_in_cma_threshold) {
65 page = __rmqueue_cma_fallback(zone, order);
66 if (page)
67 return page;