Ensure that a stride larger than 1 (huge page) is only used when both
the guest frame number (gfn) and the operation size (page_count) are
aligned to the huge page size (PTRS_PER_PMD). This matches the
hypervisor requirement that map/unmap operations for huge pages must be
guest-aligned and cover a full huge page.

Add mshv_chunk_stride() to encapsulate this alignment and page-order
validation, and plumb a huge_page flag into the region chunk handlers.
This prevents issuing large-page map/unmap/share operations that the
hypervisor would reject due to misaligned guest mappings.

Fixes: abceb4297bf8 ("mshv: Fix huge page handling in memory region traversal")
Signed-off-by: Stanislav Kinsburskii <[email protected]>
---
 drivers/hv/mshv_regions.c |   94 ++++++++++++++++++++++++++++++---------------
 1 file changed, 63 insertions(+), 31 deletions(-)

diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
index 30bacba6aec3..29776019bcde 100644
--- a/drivers/hv/mshv_regions.c
+++ b/drivers/hv/mshv_regions.c
@@ -19,6 +19,42 @@
 
 #define MSHV_MAP_FAULT_IN_PAGES                                PTRS_PER_PMD
 
+/**
+ * mshv_chunk_stride - Compute stride for mapping guest memory
+ * @page      : The page to check for huge page backing
+ * @gfn       : Guest frame number for the mapping
+ * @page_count: Total number of pages in the mapping
+ *
+ * Determines the appropriate stride (in pages) for mapping guest memory.
+ * Uses huge page stride if the backing page is huge and the guest mapping
+ * is properly aligned; otherwise falls back to single page stride.
+ *
+ * Return: Stride in pages, or -EINVAL if page order is unsupported.
+ */
+static int mshv_chunk_stride(struct page *page,
+                            u64 gfn, u64 page_count)
+{
+       unsigned int page_order;
+
+       page_order = folio_order(page_folio(page));
+       /* The hypervisor only supports 4K and 2M page sizes */
+       if (page_order && page_order != PMD_ORDER)
+               return -EINVAL;
+
+       /*
+        * Default to a single page stride. If page_order is set and both
+        * the guest frame number (gfn) and page_count are huge-page
+        * aligned (PTRS_PER_PMD), use a larger stride so the mapping can
+        * be backed by a huge page in both guest and hypervisor.
+        */
+       if (page_order &&
+           IS_ALIGNED(gfn, PTRS_PER_PMD) &&
+           IS_ALIGNED(page_count, PTRS_PER_PMD))
+               return 1 << page_order;
+
+       return 1;
+}
+
 /**
  * mshv_region_process_chunk - Processes a contiguous chunk of memory pages
  *                             in a region.
@@ -45,25 +81,23 @@ static long mshv_region_process_chunk(struct 
mshv_mem_region *region,
                                      int (*handler)(struct mshv_mem_region 
*region,
                                                     u32 flags,
                                                     u64 page_offset,
-                                                    u64 page_count))
+                                                    u64 page_count,
+                                                    bool huge_page))
 {
-       u64 count, stride;
-       unsigned int page_order;
+       u64 gfn = region->start_gfn + page_offset;
+       u64 count;
        struct page *page;
-       int ret;
+       int stride, ret;
 
        page = region->pages[page_offset];
        if (!page)
                return -EINVAL;
 
-       page_order = folio_order(page_folio(page));
-       /* The hypervisor only supports 4K and 2M page sizes */
-       if (page_order && page_order != PMD_ORDER)
-               return -EINVAL;
-
-       stride = 1 << page_order;
+       stride = mshv_chunk_stride(page, gfn, page_count);
+       if (stride < 0)
+               return stride;
 
-       /* Start at stride since the first page is validated */
+       /* Start at stride since the first stride is validated */
        for (count = stride; count < page_count; count += stride) {
                page = region->pages[page_offset + count];
 
@@ -71,12 +105,13 @@ static long mshv_region_process_chunk(struct 
mshv_mem_region *region,
                if (!page)
                        break;
 
-               /* Break if page size changes */
-               if (page_order != folio_order(page_folio(page)))
+               /* Break if stride size changes */
+               if (stride != mshv_chunk_stride(page, gfn + count,
+                                               page_count - count))
                        break;
        }
 
-       ret = handler(region, flags, page_offset, count);
+       ret = handler(region, flags, page_offset, count, stride > 1);
        if (ret)
                return ret;
 
@@ -108,7 +143,8 @@ static int mshv_region_process_range(struct mshv_mem_region 
*region,
                                     int (*handler)(struct mshv_mem_region 
*region,
                                                    u32 flags,
                                                    u64 page_offset,
-                                                   u64 page_count))
+                                                   u64 page_count,
+                                                   bool huge_page))
 {
        long ret;
 
@@ -162,11 +198,10 @@ struct mshv_mem_region *mshv_region_create(u64 guest_pfn, 
u64 nr_pages,
 
 static int mshv_region_chunk_share(struct mshv_mem_region *region,
                                   u32 flags,
-                                  u64 page_offset, u64 page_count)
+                                  u64 page_offset, u64 page_count,
+                                  bool huge_page)
 {
-       struct page *page = region->pages[page_offset];
-
-       if (PageHuge(page) || PageTransCompound(page))
+       if (huge_page)
                flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
 
        return hv_call_modify_spa_host_access(region->partition->pt_id,
@@ -188,11 +223,10 @@ int mshv_region_share(struct mshv_mem_region *region)
 
 static int mshv_region_chunk_unshare(struct mshv_mem_region *region,
                                     u32 flags,
-                                    u64 page_offset, u64 page_count)
+                                    u64 page_offset, u64 page_count,
+                                    bool huge_page)
 {
-       struct page *page = region->pages[page_offset];
-
-       if (PageHuge(page) || PageTransCompound(page))
+       if (huge_page)
                flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
 
        return hv_call_modify_spa_host_access(region->partition->pt_id,
@@ -212,11 +246,10 @@ int mshv_region_unshare(struct mshv_mem_region *region)
 
 static int mshv_region_chunk_remap(struct mshv_mem_region *region,
                                   u32 flags,
-                                  u64 page_offset, u64 page_count)
+                                  u64 page_offset, u64 page_count,
+                                  bool huge_page)
 {
-       struct page *page = region->pages[page_offset];
-
-       if (PageHuge(page) || PageTransCompound(page))
+       if (huge_page)
                flags |= HV_MAP_GPA_LARGE_PAGE;
 
        return hv_call_map_gpa_pages(region->partition->pt_id,
@@ -295,11 +328,10 @@ int mshv_region_pin(struct mshv_mem_region *region)
 
 static int mshv_region_chunk_unmap(struct mshv_mem_region *region,
                                   u32 flags,
-                                  u64 page_offset, u64 page_count)
+                                  u64 page_offset, u64 page_count,
+                                  bool huge_page)
 {
-       struct page *page = region->pages[page_offset];
-
-       if (PageHuge(page) || PageTransCompound(page))
+       if (huge_page)
                flags |= HV_UNMAP_GPA_LARGE_PAGE;
 
        return hv_call_unmap_gpa_pages(region->partition->pt_id,



Reply via email to