On 20.01.23 г. 14:39 ч., Alexander Atanasov wrote:
From: Andrey Ryabinin <ryabinin....@gmail.com>
Forward port feature: mm: per memory cgroup page cache limit.
The original implementation consisted of these commits:
commit 758d52e33a67 ("configs: Enable CONFIG_PAGE_EXTENSION")
commit 741beaa93c89 ("mm: introduce page vz extension (using page_ext)")
commit d42d3c8b849d ("mm/memcg: limit page cache in memcg hack")
This port drops the page vz extensions in favor of using a memcg_data
bit to mark a page as cache. The benefit is that the implementation
and porting got more simple. If we require new flags then the newly
introduced folio can be used.
Feature exposes two files to set limit and to check usage at
/sys/fs/cgroup/memory/pagecache_limiter/memory.cache.limit_in_bytes
/sys/fs/cgroup/memory/pagecache_limiter/memory.cache.usage_in_bytes
and is enabled via /sys/fs/cgroup/memory/pagecache_limiter/tasks.
https://jira.sw.ru/browse/PSBM-144609
Signed-off-by: Alexander Atanasov <alexander.atana...@virtuozzo.com>
Signed-off-by: Andrey Ryabinin <ryabinin....@gmail.com>
Just 2 nits from a quick cursory look which hopefully improve
readability, nothing major as I'm not familiar with the code. But see
below.
<snip>
@@ -2260,9 +2263,16 @@ static bool consume_stock(struct mem_cgroup *memcg,
unsigned int nr_pages)
local_lock_irqsave(&memcg_stock.stock_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
- if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
- stock->nr_pages -= nr_pages;
- ret = true;
+ if (memcg == stock->cached) {
+ if (cache && stock->cache_nr_pages >= nr_pages) {
+ stock->cache_nr_pages -= nr_pages;
+ ret = true;
+ }
+
+ if (!cache && stock->nr_pages >= nr_pages) {
+ stock->nr_pages -= nr_pages;
+ ret = true;
+ }
nit: I find the original condition somewhat more readaable i.e having
the memch stock->cached and cache_nr_pages check in the same if.
if (memcg == stock->cached && stock->cache_nr_pages >= nr_pages) {
if (cached)
stock->cache_nr_pages -= nr_pages;
else
stock->nr_pages -= nr_pages;
ret = true;
}
}
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
@@ -2276,15 +2286,20 @@ static bool consume_stock(struct mem_cgroup *memcg,
unsigned int nr_pages)
static void drain_stock(struct memcg_stock_pcp *stock)
{
struct mem_cgroup *old = stock->cached;
+ unsigned long nr_pages = stock->nr_pages + stock->cache_nr_pages;
if (!old)
return;
- if (stock->nr_pages) {
- page_counter_uncharge(&old->memory, stock->nr_pages);
+ if (stock->cache_nr_pages)
+ page_counter_uncharge(&old->cache, stock->cache_nr_pages);
+
+ if (nr_pages) {
+ page_counter_uncharge(&old->memory, nr_pages);
if (do_memsw_account())
- page_counter_uncharge(&old->memsw, stock->nr_pages);
+ page_counter_uncharge(&old->memsw, nr_pages);
stock->nr_pages = 0;
+ stock->cache_nr_pages = 0;
}
css_put(&old->css);
@@ -2318,9 +2333,11 @@ static void drain_local_stock(struct work_struct *dummy)
* Cache charges(val) to local per_cpu area.
* This will be consumed by consume_stock() function, later.
*/
-static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
+ bool cache)
{
struct memcg_stock_pcp *stock;
+ unsigned long stock_nr_pages;
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
@@ -2328,18 +2345,23 @@ static void __refill_stock(struct mem_cgroup *memcg,
unsigned int nr_pages)
css_get(&memcg->css);
stock->cached = memcg;
}
- stock->nr_pages += nr_pages;
+ if (!cache)
+ stock->nr_pages += nr_pages;
+ else
+ stock->cache_nr_pages += nr_pages;
nit:
It reads more natural if we do if (cache) else rathen with the negative
condition.
- if (stock->nr_pages > MEMCG_CHARGE_BATCH)
+ stock_nr_pages = stock->nr_pages + stock->cache_nr_pages;
+ if (stock_nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
}
<snip>
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel