Skip to content

Commit 1fc07c3

Browse files
committed
memcg: Use trylock to access memcg stock_lock.
JIRA: https://issues.redhat.com/browse/RHEL-78202 commit 01d3722 Author: Alexei Starovoitov <ast@kernel.org> Date: Fri Feb 21 18:44:25 2025 -0800 memcg: Use trylock to access memcg stock_lock. Teach memcg to operate under trylock conditions when spinning locks cannot be used. localtry_trylock might fail and this would lead to charge cache bypass if the calling context doesn't allow spinning (gfpflags_allow_spinning). In those cases charge the memcg counter directly and fail early if that is not possible. This might cause a pre-mature charge failing but it will allow an opportunistic charging that is safe from try_alloc_pages path. Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/r/20250222024427.30294-5-alexei.starovoitov@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Gregory Bell <grbell@redhat.com>
1 parent 92af98c commit 1fc07c3

File tree

1 file changed

+37
-16
lines changed

1 file changed

+37
-16
lines changed

mm/memcontrol.c

Lines changed: 37 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1771,7 +1771,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
17711771
}
17721772

17731773
struct memcg_stock_pcp {
1774-
local_lock_t stock_lock;
1774+
localtry_lock_t stock_lock;
17751775
struct mem_cgroup *cached; /* this never be root cgroup */
17761776
unsigned int nr_pages;
17771777

@@ -1786,7 +1786,7 @@ struct memcg_stock_pcp {
17861786
#define FLUSHING_CACHED_CHARGE 0
17871787
};
17881788
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1789-
.stock_lock = INIT_LOCAL_LOCK(stock_lock),
1789+
.stock_lock = INIT_LOCALTRY_LOCK(stock_lock),
17901790
};
17911791
static DEFINE_MUTEX(percpu_charge_mutex);
17921792

@@ -1798,14 +1798,16 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
17981798
* consume_stock: Try to consume stocked charge on this cpu.
17991799
* @memcg: memcg to consume from.
18001800
* @nr_pages: how many pages to charge.
1801+
* @gfp_mask: allocation mask.
18011802
*
18021803
* The charges will only happen if @memcg matches the current cpu's memcg
18031804
* stock, and at least @nr_pages are available in that stock. Failure to
18041805
* service an allocation will refill the stock.
18051806
*
18061807
* returns true if successful, false otherwise.
18071808
*/
1808-
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1809+
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
1810+
gfp_t gfp_mask)
18091811
{
18101812
struct memcg_stock_pcp *stock;
18111813
unsigned int stock_pages;
@@ -1815,7 +1817,11 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
18151817
if (nr_pages > MEMCG_CHARGE_BATCH)
18161818
return ret;
18171819

1818-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
1820+
if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
1821+
if (!gfpflags_allow_spinning(gfp_mask))
1822+
return ret;
1823+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
1824+
}
18191825

18201826
stock = this_cpu_ptr(&memcg_stock);
18211827
stock_pages = READ_ONCE(stock->nr_pages);
@@ -1824,7 +1830,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
18241830
ret = true;
18251831
}
18261832

1827-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1833+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
18281834

18291835
return ret;
18301836
}
@@ -1863,14 +1869,14 @@ static void drain_local_stock(struct work_struct *dummy)
18631869
* drain_stock races is that we always operate on local CPU stock
18641870
* here with IRQ disabled
18651871
*/
1866-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
1872+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
18671873

18681874
stock = this_cpu_ptr(&memcg_stock);
18691875
old = drain_obj_stock(stock);
18701876
drain_stock(stock);
18711877
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
18721878

1873-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1879+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
18741880
obj_cgroup_put(old);
18751881
}
18761882

@@ -1900,9 +1906,20 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
19001906
{
19011907
unsigned long flags;
19021908

1903-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
1909+
if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
1910+
/*
1911+
* In case of unlikely failure to lock percpu stock_lock
1912+
* uncharge memcg directly.
1913+
*/
1914+
if (mem_cgroup_is_root(memcg))
1915+
return;
1916+
page_counter_uncharge(&memcg->memory, nr_pages);
1917+
if (do_memsw_account())
1918+
page_counter_uncharge(&memcg->memsw, nr_pages);
1919+
return;
1920+
}
19041921
__refill_stock(memcg, nr_pages);
1905-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1922+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
19061923
}
19071924

19081925
/*
@@ -2254,9 +2271,13 @@ int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
22542271
unsigned long pflags;
22552272

22562273
retry:
2257-
if (consume_stock(memcg, nr_pages))
2274+
if (consume_stock(memcg, nr_pages, gfp_mask))
22582275
return 0;
22592276

2277+
if (!gfpflags_allow_spinning(gfp_mask))
2278+
/* Avoid the refill and flush of the older stock */
2279+
batch = nr_pages;
2280+
22602281
if (!do_memsw_account() ||
22612282
page_counter_try_charge(&memcg->memsw, batch, &counter)) {
22622283
if (page_counter_try_charge(&memcg->memory, batch, &counter))
@@ -2740,7 +2761,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
27402761
unsigned long flags;
27412762
int *bytes;
27422763

2743-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2764+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
27442765
stock = this_cpu_ptr(&memcg_stock);
27452766

27462767
/*
@@ -2793,7 +2814,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
27932814
if (nr)
27942815
__mod_objcg_mlstate(objcg, pgdat, idx, nr);
27952816

2796-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2817+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
27972818
obj_cgroup_put(old);
27982819
}
27992820

@@ -2803,15 +2824,15 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
28032824
unsigned long flags;
28042825
bool ret = false;
28052826

2806-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2827+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
28072828

28082829
stock = this_cpu_ptr(&memcg_stock);
28092830
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
28102831
stock->nr_bytes -= nr_bytes;
28112832
ret = true;
28122833
}
28132834

2814-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2835+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
28152836

28162837
return ret;
28172838
}
@@ -2903,7 +2924,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
29032924
unsigned long flags;
29042925
unsigned int nr_pages = 0;
29052926

2906-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2927+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
29072928

29082929
stock = this_cpu_ptr(&memcg_stock);
29092930
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
@@ -2921,7 +2942,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
29212942
stock->nr_bytes &= (PAGE_SIZE - 1);
29222943
}
29232944

2924-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2945+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
29252946
obj_cgroup_put(old);
29262947

29272948
if (nr_pages)

0 commit comments

Comments
 (0)