Consolidate memory region processing to handle both valid and invalid PFNs
uniformly. This eliminates code duplication across remap, unmap, share, and
unshare operations by using a common range processing interface.

Holes are now remapped with no-access permissions to enable
hypervisor dirty page tracking for precopy live migration.

This refactoring is a precursor to an upcoming change that will map
present pages in movable regions upon region creation, requiring
consistent handling of both mapped and unmapped ranges.

Signed-off-by: Stanislav Kinsburskii <[email protected]>
---
 drivers/hv/mshv_regions.c |   70 ++++++++++++++++++++++++++++-----------------
 1 file changed, 43 insertions(+), 27 deletions(-)

diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
index 090c4052f0f4d..579a29f2924b8 100644
--- a/drivers/hv/mshv_regions.c
+++ b/drivers/hv/mshv_regions.c
@@ -81,30 +81,23 @@ static int mshv_chunk_stride(unsigned long pfn, u64 gfn, 
u64 pfn_count)
 }
 
 /**
- * mshv_region_chunk_size - Length of the next same-stride PFN run.
+ * mshv_region_chunk_size - Length of the next contiguous PFN run in a region.
  * @region    : Memory region whose PFN array is being walked.
- * @pfn_offset: Offset into region->mreg_pfns at which to start; the
- *              PFN at this offset must be valid.
- * @pfn_count : Upper bound on the run length (not necessarily the
- *              region's total length; typically the residual passed
- *              from mshv_region_process_range()).
- * @huge_page : Out-parameter set to true if the run is backed by
- *              PMD-order folios and may be dispatched as 2 MiB
- *              chunks; false for 4 KiB-stride dispatch.
+ * @pfn_offset: Offset into region->mreg_pfns at which to start.
+ * @pfn_count : Upper bound on the run length.
+ * @huge_page : Out-parameter set to true if the run may be dispatched
+ *              as a 2 MiB chunk; false for 4 KiB-stride dispatch.
  *
- * Walks the PFN array starting at @pfn_offset and returns the length
- * of the longest contiguous run that shares the stride classification
- * (4 KiB vs 2 MiB) of the first PFN.  An invalid PFN inside the run
- * terminates it.  The run is bounded above by @pfn_count.
- *
- * The caller may then dispatch [pfn_offset, pfn_offset + return) to a
- * handler with @huge_page indicating which stride applies.  After the
- * dispatch the caller advances by the returned length and re-invokes
- * this function for the next run.
+ * Returns the length of the longest contiguous run starting at @pfn_offset
+ * that shares the classification of the first PFN: either a same-stride run of
+ * valid PFNs (4 KiB or 2 MiB) or a hole of invalid PFNs. A hole that is
+ * huge-page aligned in @gfn space and at least PTRS_PER_PMD entries long is
+ * reported as a 2 MiB chunk (huge_page = true) so the caller can dispatch it
+ * as a single HV_MAP_GPA_NO_ACCESS huge mapping. The run is bounded above by
+ * @pfn_count.
  *
  * Return: Length of the run in PFNs, or a negative errno from
- *         mshv_chunk_stride() if the starting PFN is invalid or its
- *         backing folio order is unsupported.
+ *         mshv_chunk_stride() if the backing folio order is unsupported.
  */
 static long mshv_region_chunk_size(struct mshv_mem_region *region,
                                   u64 pfn_offset, u64 pfn_count,
@@ -114,6 +107,22 @@ static long mshv_region_chunk_size(struct mshv_mem_region 
*region,
        u64 gfn = region->start_gfn + pfn_offset;
        u64 count = 0, stride;
 
+       if (!mshv_pfn_valid(pfns[0])) {
+               for (count = 1; count < pfn_count; count++) {
+                       if (mshv_pfn_valid(pfns[count]))
+                               break;
+               }
+
+               if (IS_ALIGNED(gfn, PTRS_PER_PMD) &&
+                   count >= PTRS_PER_PMD) {
+                       *huge_page = true;
+                       return ALIGN_DOWN(count, PTRS_PER_PMD);
+               }
+
+               *huge_page = false;
+               return count;
+       }
+
        stride = mshv_chunk_stride(pfns[0], gfn, pfn_count);
        if (stride < 0)
                return stride;
@@ -170,13 +179,6 @@ static int mshv_region_process_range(struct 
mshv_mem_region *region,
                bool huge_page;
                long count;
 
-               /* Skip non-present pages */
-               if (!mshv_pfn_valid(region->mreg_pfns[pfn_offset])) {
-                       pfn_offset++;
-                       pfn_count--;
-                       continue;
-               }
-
                count = mshv_region_chunk_size(region, pfn_offset, pfn_count,
                                               &huge_page);
                if (count < 0)
@@ -223,6 +225,9 @@ static int mshv_region_chunk_share(struct mshv_mem_region 
*region,
                                   u64 pfn_offset, u64 pfn_count,
                                   bool huge_page)
 {
+       if (!mshv_pfn_valid(region->mreg_pfns[pfn_offset]))
+               return -EINVAL;
+
        if (huge_page)
                flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
 
@@ -248,6 +253,9 @@ static int mshv_region_chunk_unshare(struct mshv_mem_region 
*region,
                                     u64 pfn_offset, u64 pfn_count,
                                     bool huge_page)
 {
+       if (!mshv_pfn_valid(region->mreg_pfns[pfn_offset]))
+               return -EINVAL;
+
        if (huge_page)
                flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
 
@@ -271,6 +279,14 @@ static int mshv_region_chunk_remap(struct mshv_mem_region 
*region,
                                   u64 pfn_offset, u64 pfn_count,
                                   bool huge_page)
 {
+       /*
+        * Remap missing pages with no access to let the
+        * hypervisor track dirty pages, enabling precopy live
+        * migration.
+        */
+       if (!mshv_pfn_valid(region->mreg_pfns[pfn_offset]))
+               flags = HV_MAP_GPA_NO_ACCESS;
+
        if (huge_page)
                flags |= HV_MAP_GPA_LARGE_PAGE;
 



Reply via email to