Add new tests for migrating anon THP pages, including anon_huge,
anon_huge_zero and error cases involving forced splitting of pages
during migration.

Cc: Andrew Morton <a...@linux-foundation.org>
Cc: David Hildenbrand <da...@redhat.com>
Cc: Zi Yan <z...@nvidia.com>
Cc: Joshua Hahn <joshua.hah...@gmail.com>
Cc: Rakie Kim <rakie....@sk.com>
Cc: Byungchul Park <byungc...@sk.com>
Cc: Gregory Price <gou...@gourry.net>
Cc: Ying Huang <ying.hu...@linux.alibaba.com>
Cc: Alistair Popple <apop...@nvidia.com>
Cc: Oscar Salvador <osalva...@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoa...@oracle.com>
Cc: Baolin Wang <baolin.w...@linux.alibaba.com>
Cc: "Liam R. Howlett" <liam.howl...@oracle.com>
Cc: Nico Pache <npa...@redhat.com>
Cc: Ryan Roberts <ryan.robe...@arm.com>
Cc: Dev Jain <dev.j...@arm.com>
Cc: Barry Song <bao...@kernel.org>
Cc: Lyude Paul <ly...@redhat.com>
Cc: Danilo Krummrich <d...@kernel.org>
Cc: David Airlie <airl...@gmail.com>
Cc: Simona Vetter <sim...@ffwll.ch>
Cc: Ralph Campbell <rcampb...@nvidia.com>
Cc: Mika Penttilä <mpent...@redhat.com>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: Francois Dugast <francois.dug...@intel.com>

Signed-off-by: Balbir Singh <balb...@nvidia.com>
---
 tools/testing/selftests/mm/hmm-tests.c | 410 +++++++++++++++++++++++++
 1 file changed, 410 insertions(+)

diff --git a/tools/testing/selftests/mm/hmm-tests.c 
b/tools/testing/selftests/mm/hmm-tests.c
index 141bf63cbe05..da3322a1282c 100644
--- a/tools/testing/selftests/mm/hmm-tests.c
+++ b/tools/testing/selftests/mm/hmm-tests.c
@@ -2056,4 +2056,414 @@ TEST_F(hmm, hmm_cow_in_device)
 
        hmm_buffer_free(buffer);
 }
+
+/*
+ * Migrate private anonymous huge empty page.
+ */
+TEST_F(hmm, migrate_anon_huge_empty)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Migrate memory to device. */
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], 0);
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge zero page.
+ */
+TEST_F(hmm, migrate_anon_huge_zero)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+       int val;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Initialize a read-only zero huge page. */
+       val = *(int *)buffer->ptr;
+       ASSERT_EQ(val, 0);
+
+       /* Migrate memory to device. */
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], 0);
+
+       /* Fault pages back to system memory and check them. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) {
+               ASSERT_EQ(ptr[i], 0);
+               /* If it asserts once, it probably will 500,000 times */
+               if (ptr[i] != 0)
+                       break;
+       }
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge page and free.
+ */
+TEST_F(hmm, migrate_anon_huge_free)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ | PROT_WRITE,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Initialize buffer in system memory. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ptr[i] = i;
+
+       /* Migrate memory to device. */
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], i);
+
+       /* Try freeing it. */
+       ret = madvise(map, size, MADV_FREE);
+       ASSERT_EQ(ret, 0);
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge page and fault back to sysmem.
+ */
+TEST_F(hmm, migrate_anon_huge_fault)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ | PROT_WRITE,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Initialize buffer in system memory. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ptr[i] = i;
+
+       /* Migrate memory to device. */
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], i);
+
+       /* Fault pages back to system memory and check them. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], i);
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge page with allocation errors.
+ */
+TEST_F(hmm, migrate_anon_huge_err)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(2 * size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, 2 * size);
+
+       old_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
+                       MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+       ASSERT_NE(old_ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)old_ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       buffer->ptr = map;
+
+       /* Initialize buffer in system memory. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ptr[i] = i;
+
+       /* Migrate memory to device but force a THP allocation error. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
+                             HMM_DMIRROR_FLAG_FAIL_ALLOC);
+       ASSERT_EQ(ret, 0);
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) {
+               ASSERT_EQ(ptr[i], i);
+               if (ptr[i] != i)
+                       break;
+       }
+
+       /* Try faulting back a single (PAGE_SIZE) page. */
+       ptr = buffer->ptr;
+       ASSERT_EQ(ptr[2048], 2048);
+
+       /* unmap and remap the region to reset things. */
+       ret = munmap(old_ptr, 2 * size);
+       ASSERT_EQ(ret, 0);
+       old_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
+                       MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+       ASSERT_NE(old_ptr, MAP_FAILED);
+       map = (void *)ALIGN((uintptr_t)old_ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       buffer->ptr = map;
+
+       /* Initialize buffer in system memory. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ptr[i] = i;
+
+       /* Migrate THP to device. */
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /*
+        * Force an allocation error when faulting back a THP resident in the
+        * device.
+        */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
+                             HMM_DMIRROR_FLAG_FAIL_ALLOC);
+       ASSERT_EQ(ret, 0);
+
+       ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ptr = buffer->ptr;
+       ASSERT_EQ(ptr[2048], 2048);
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge zero page with allocation errors.
+ */
+TEST_F(hmm, migrate_anon_huge_zero_err)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(2 * size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, 2 * size);
+
+       old_ptr = mmap(NULL, 2 * size, PROT_READ,
+                       MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+       ASSERT_NE(old_ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)old_ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       buffer->ptr = map;
+
+       /* Migrate memory to device but force a THP allocation error. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
+                             HMM_DMIRROR_FLAG_FAIL_ALLOC);
+       ASSERT_EQ(ret, 0);
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], 0);
+
+       /* Try faulting back a single (PAGE_SIZE) page. */
+       ptr = buffer->ptr;
+       ASSERT_EQ(ptr[2048], 0);
+
+       /* unmap and remap the region to reset things. */
+       ret = munmap(old_ptr, 2 * size);
+       ASSERT_EQ(ret, 0);
+       old_ptr = mmap(NULL, 2 * size, PROT_READ,
+                       MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+       ASSERT_NE(old_ptr, MAP_FAILED);
+       map = (void *)ALIGN((uintptr_t)old_ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       buffer->ptr = map;
+
+       /* Initialize buffer in system memory (zero THP page). */
+       ret = ptr[0];
+       ASSERT_EQ(ret, 0);
+
+       /* Migrate memory to device but force a THP allocation error. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
+                             HMM_DMIRROR_FLAG_FAIL_ALLOC);
+       ASSERT_EQ(ret, 0);
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Fault the device memory back and check it. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], 0);
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
 TEST_HARNESS_MAIN
-- 
2.50.1

Reply via email to