test_no_invasive_cgroup_shrink sets up two cgroups: wb_group, which is
expected to trigger zswap writeback, and a control group (renamed to
zw_group), which should only have pages sitting in zswap without any
writeback.

There are two problems with the current test:

1) The data patterns are reversed. wb_group uses allocate_bytes(), which
   writes only a single byte per page — trivially compressible,
   especially by zstd — so compressed pages fit within zswap.max and
   writeback is never triggered. Meanwhile, the control group uses
   getrandom() to produce hard-to-compress data, but it is the group
   that does *not* need writeback.

2) The test uses fixed sizes (10K zswap.max, 10MB allocation) that are
   too small on systems with large PAGE_SIZE (e.g. 64K), failing to
   build enough memory pressure to trigger writeback reliably.

Fix both issues by:
  - Swapping the data patterns: fill wb_group pages with partially
    random data (getrandom for page_size/4 bytes) to resist compression
    and trigger writeback, and fill zw_group pages with simple repeated
    data to stay compressed in zswap.
  - Making all size parameters PAGE_SIZE-aware: set allocation size to
    PAGE_SIZE * 1024, memory.zswap.max to PAGE_SIZE, and memory.max to
    allocation_size / 2 for both cgroups.
  - Allocating memory inline instead of via cg_run() so the pages
    remain resident throughout the test.

=== Error Log ===
 # getconf PAGESIZE
 65536

 # ./test_zswap
 TAP version 13
 ...
 ok 5 test_zswap_writeback_disabled
 ok 6 # SKIP test_no_kmem_bypass
 not ok 7 test_no_invasive_cgroup_shrink

Signed-off-by: Li Wang <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Michal Koutný <[email protected]>
Cc: Muchun Song <[email protected]>
Cc: Nhat Pham <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: Yosry Ahmed <[email protected]>
---

Notes:
    v5:
        - Swap data patterns: use getrandom() for wb_group and simple
          memset for zw_group to fix the reversed allocation logic.
        - Rename control_group to zw_group for clarity.
        - Allocate memory inline instead of via cg_run() so pages remain
          resident throughout the test.

 tools/testing/selftests/cgroup/test_zswap.c | 69 ++++++++++++++-------
 1 file changed, 48 insertions(+), 21 deletions(-)

diff --git a/tools/testing/selftests/cgroup/test_zswap.c 
b/tools/testing/selftests/cgroup/test_zswap.c
index fc7a9aa27e27..72082e5d4725 100644
--- a/tools/testing/selftests/cgroup/test_zswap.c
+++ b/tools/testing/selftests/cgroup/test_zswap.c
@@ -9,6 +9,7 @@
 #include <string.h>
 #include <sys/wait.h>
 #include <sys/mman.h>
+#include <sys/random.h>
 
 #include "kselftest.h"
 #include "cgroup_util.h"
@@ -424,44 +425,70 @@ static int test_zswap_writeback_disabled(const char *root)
 static int test_no_invasive_cgroup_shrink(const char *root)
 {
        int ret = KSFT_FAIL;
-       size_t control_allocation_size = MB(10);
-       char *control_allocation = NULL, *wb_group = NULL, *control_group = 
NULL;
+       unsigned int off;
+       size_t allocation_size = page_size * 1024;
+       unsigned int nr_pages = allocation_size / page_size;
+       char zswap_max_buf[32], mem_max_buf[32];
+       char *zw_allocation = NULL, *wb_allocation = NULL;
+       char *zw_group = NULL, *wb_group = NULL;
+
+       snprintf(zswap_max_buf, sizeof(zswap_max_buf), "%zu", page_size);
+       snprintf(mem_max_buf, sizeof(mem_max_buf), "%zu", allocation_size / 2);
 
        wb_group = setup_test_group_1M(root, "per_memcg_wb_test1");
        if (!wb_group)
                return KSFT_FAIL;
-       if (cg_write(wb_group, "memory.zswap.max", "10K"))
+       if (cg_write(wb_group, "memory.zswap.max", zswap_max_buf))
+               goto out;
+       if (cg_write(wb_group, "memory.max", mem_max_buf))
+               goto out;
+
+       zw_group = setup_test_group_1M(root, "per_memcg_wb_test2");
+       if (!zw_group)
                goto out;
-       control_group = setup_test_group_1M(root, "per_memcg_wb_test2");
-       if (!control_group)
+       if (cg_write(zw_group, "memory.max", mem_max_buf))
                goto out;
 
-       /* Push some test_group2 memory into zswap */
-       if (cg_enter_current(control_group))
+       /* Push some zw_group memory into zswap (simple data, easy to compress) 
*/
+       if (cg_enter_current(zw_group))
                goto out;
-       control_allocation = malloc(control_allocation_size);
-       for (int i = 0; i < control_allocation_size; i += page_size)
-               control_allocation[i] = 'a';
-       if (cg_read_key_long(control_group, "memory.stat", "zswapped") < 1)
+       zw_allocation = malloc(allocation_size);
+       for (int i = 0; i < nr_pages; i++) {
+               off = (unsigned long)i * page_size;
+               memset(&zw_allocation[off], 'a', page_size/4);
+       }
+       if (cg_read_key_long(zw_group, "memory.stat", "zswapped") < 1)
                goto out;
 
-       /* Allocate 10x memory.max to push wb_group memory into zswap and 
trigger wb */
-       if (cg_run(wb_group, allocate_bytes, (void *)MB(10)))
+       /* Push wb_group memory into zswap with hard-to-compress data to 
trigger wb */
+       if (cg_enter_current(wb_group))
+               goto out;
+       wb_allocation = malloc(allocation_size);
+       if (!wb_allocation)
                goto out;
+       for (int i = 0; i < nr_pages; i++) {
+               off = (unsigned long)i * page_size;
+               memset(&wb_allocation[off], 0, page_size);
+               getrandom(&wb_allocation[off], page_size/4, 0);
+       }
 
        /* Verify that only zswapped memory from gwb_group has been written 
back */
-       if (get_cg_wb_count(wb_group) > 0 && get_cg_wb_count(control_group) == 
0)
+       if (get_cg_wb_count(wb_group) > 0 && get_cg_wb_count(zw_group) == 0)
                ret = KSFT_PASS;
 out:
        cg_enter_current(root);
-       if (control_group) {
-               cg_destroy(control_group);
-               free(control_group);
+       if (zw_group) {
+               cg_destroy(zw_group);
+               free(zw_group);
+       }
+       if (wb_group) {
+               cg_destroy(wb_group);
+               free(wb_group);
        }
-       cg_destroy(wb_group);
-       free(wb_group);
-       if (control_allocation)
-               free(control_allocation);
+       if (zw_allocation)
+               free(zw_allocation);
+       if (wb_allocation)
+               free(wb_allocation);
        return ret;
 }
 
-- 
2.53.0


Reply via email to