The current code requires passing both the full pfn array and an offset
parameter to region processing functions, forcing callees to manually
index into arrays. This approach is inflexible and makes it difficult
to work with different sources of pfn arrays.

Upcoming changes will need to pass pfn arrays obtained from the HMM
framework directly to these functions. The HMM framework returns arrays
that represent specific ranges rather than full region arrays with
offsets, making the current offset-based indexing pattern incompatible.

Refactor by having callers pass pre-offset pointers to pfn arrays and
removing offset-based indexing from callees. This allows functions to
work with any pfn array starting at index 0, regardless of its source,
and prepares the code for HMM integration.

No functional change intended.

Signed-off-by: Stanislav Kinsburskii <[email protected]>
---
 drivers/hv/mshv_regions.c |   41 +++++++++++++++++++----------------------
 1 file changed, 19 insertions(+), 22 deletions(-)

diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
index 87204b2b48290..e20db61e9829f 100644
--- a/drivers/hv/mshv_regions.c
+++ b/drivers/hv/mshv_regions.c
@@ -99,14 +99,13 @@ static int mshv_chunk_stride(unsigned long pfn, u64 gfn, 
u64 pfn_count)
 
 /**
  * mshv_region_chunk_size - Length of the next contiguous PFN run in a region.
- * @region    : Memory region whose PFN array is being walked.
- * @pfn_offset: Offset into region->mreg_pfns at which to start.
- * @pfn_count : Upper bound on the run length.
- * @pfns      : Pointer to an array of PFNs corresponding to the region.
- * @huge_page : Out-parameter set to true if the run may be dispatched
+ * @gfn      : GFN corresponding to the start of the PFN run.
+ * @pfn_count: Upper bound on the run length.
+ * @pfns     : PFN array starting at the chunk's first PFN.
+ * @huge_page: Out-parameter set to true if the run may be dispatched
  *              as a 2 MiB chunk; false for 4 KiB-stride dispatch.
  *
- * Returns the length of the longest contiguous run starting at @pfn_offset
+ * Returns the length of the longest contiguous run starting at at @pfns[0]
  * that shares the classification of the first PFN: either a same-stride run of
  * valid PFNs (4 KiB or 2 MiB) or a hole of invalid PFNs. A hole that is
  * huge-page aligned in @gfn space and at least PTRS_PER_PMD entries long is
@@ -117,15 +116,11 @@ static int mshv_chunk_stride(unsigned long pfn, u64 gfn, 
u64 pfn_count)
  * Return: Length of the run in PFNs, or a negative errno from
  *         mshv_chunk_stride() if the backing folio order is unsupported.
  */
-static long mshv_region_chunk_size(struct mshv_region *region,
-                                  u64 pfn_offset, u64 pfn_count,
+static long mshv_region_chunk_size(u64 gfn, u64 pfn_count,
                                   unsigned long *pfns, bool *huge_page)
 {
-       u64 gfn = region->start_gfn + pfn_offset;
        u64 count = 0, stride;
 
-       pfns += pfn_offset;
-
        if (!mshv_pfn_valid(pfns[0])) {
                for (count = 1; count < pfn_count; count++) {
                        if (mshv_pfn_valid(pfns[count]))
@@ -162,7 +157,7 @@ static long mshv_region_chunk_size(struct mshv_region 
*region,
  * mshv_region_process_range - Processes a range of PFNs in a region.
  * @region    : Pointer to the memory region structure.
  * @flags     : Flags to pass to the handler.
- * @pfn_offset: Offset into the region's PFNs array to start processing.
+ * @pfn_offset: Offset into the region's PFN's array to start processing.
  * @pfn_count : Number of PFNs to process.
  * @pfns      : Pointer to an array of PFNs corresponding to the region.
  * @handler   : Callback function to handle each chunk of contiguous
@@ -183,6 +178,7 @@ static int mshv_region_process_range(struct mshv_region 
*region,
                                     unsigned long *pfns,
                                     pfn_handler_t handler)
 {
+       u64 gfn = region->start_gfn + pfn_offset;
        u64 end;
        long ret;
 
@@ -196,7 +192,7 @@ static int mshv_region_process_range(struct mshv_region 
*region,
                bool huge_page;
                long count;
 
-               count = mshv_region_chunk_size(region, pfn_offset, pfn_count,
+               count = mshv_region_chunk_size(gfn, pfn_count,
                                               pfns, &huge_page);
                if (count < 0)
                        return count;
@@ -208,6 +204,8 @@ static int mshv_region_process_range(struct mshv_region 
*region,
 
                pfn_offset += count;
                pfn_count -= count;
+               pfns += count;
+               gfn += count;
        }
 
        return 0;
@@ -274,15 +272,14 @@ static int mshv_region_chunk_share(struct mshv_region 
*region,
                                   unsigned long *pfns,
                                   bool huge_page)
 {
-       if (!mshv_pfn_valid(pfns[pfn_offset]))
+       if (!mshv_pfn_valid(pfns[0]))
                return -EINVAL;
 
        if (huge_page)
                flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
 
        return hv_call_modify_spa_host_access(region->partition->pt_id,
-                                             pfns + pfn_offset,
-                                             pfn_count,
+                                             pfns, pfn_count,
                                              HV_MAP_GPA_READABLE |
                                              HV_MAP_GPA_WRITABLE,
                                              flags, true);
@@ -304,15 +301,15 @@ static int mshv_region_chunk_unshare(struct mshv_region 
*region,
                                     unsigned long *pfns,
                                     bool huge_page)
 {
-       if (!mshv_pfn_valid(pfns[pfn_offset]))
+       if (!mshv_pfn_valid(pfns[0]))
                return -EINVAL;
 
        if (huge_page)
                flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
 
        return hv_call_modify_spa_host_access(region->partition->pt_id,
-                                             pfns + pfn_offset,
-                                             pfn_count, 0,
+                                             pfns, pfn_count,
+                                             0,
                                              flags, false);
 }
 
@@ -337,7 +334,7 @@ static int mshv_region_chunk_remap(struct mshv_region 
*region,
         * hypervisor track dirty pages, enabling precopy live
         * migration.
         */
-       if (!mshv_pfn_valid(pfns[pfn_offset]))
+       if (!mshv_pfn_valid(pfns[0]))
                flags = HV_MAP_GPA_NO_ACCESS;
 
        if (huge_page)
@@ -346,7 +343,7 @@ static int mshv_region_chunk_remap(struct mshv_region 
*region,
        return hv_call_map_ram_pfns(region->partition->pt_id,
                                    region->start_gfn + pfn_offset,
                                    pfn_count, flags,
-                                   pfns + pfn_offset);
+                                   pfns);
 }
 
 static int mshv_region_remap_pfns(struct mshv_region *region,
@@ -682,7 +679,7 @@ static int mshv_region_collect_and_map(struct mshv_region 
*region,
 
        ret = mshv_region_remap_pfns(region, region->hv_map_flags,
                                     pfn_offset, pfn_count,
-                                    region->mreg_pfns);
+                                    region->mreg_pfns + pfn_offset);
 
        mutex_unlock(&region->mreg_mutex);
 out:



Reply via email to