On Thu, May 07, 2026 at 07:09:13PM +0000, Michael Kelley wrote:
> From: Stanislav Kinsburskii <[email protected]>
> > 
> > Clean up hv_do_map_gpa_hcall() and hv_call_unmap_gpa_pages() after the
> > preceding bug-fix patches:
> > 
> > Move "done += completed" before the status checks so that pages mapped
> > by a partially-successful batch are included in the error cleanup unmap.
> > Previously these mappings were leaked on failure.
> > 
> > While here, improve type safety and readability:
> >  - Change "int done" to "u64 done" to match the u64 page_count it is
> >    compared against, avoiding signed/unsigned comparison hazards.
> >  - Use u64 for loop iteration and batch size variables consistently.
> >  - Add proper braces to the for-loop body in hv_do_map_gpa_hcall().
> >  - Remove unnecessary "ret" variable from hv_call_unmap_gpa_pages().
> >  - Simplify the error-path unmap to use "done << large_shift" directly
> >    instead of mutating done in place.
> > 
> > v3: aligned changes by 80 colons
> > v2: replaced min with min_t
> > 
> > Fixes: 621191d709b14 ("Drivers: hv: Introduce mshv_root module to expose 
> > /dev/mshv to VMMs")
> > Signed-off-by: Stanislav Kinsburskii <[email protected]>
> 
> Question about "packaging" of this patch. To apply cleanly, it
> needs the previous two fixes applied.  As such, shouldn't it be
> the 3rd patch in patch set that includes the other two?
> 
> Also, there are changes in the previous two fixes that get undone
> or changed by this patch (such as applying large_shift in the error
> path of hv_do_map_gpa_hcall(). With a little more coordination
> between the three patches, there could be less code churn and
> the patches would overall be smaller.
> 

Indeed. I should have done it as a series, but I didn't.
So, to avoid any additonal churn we the reset of the patches coming on
top (a series of fixes), I'd rather keep this one individual.

Thanks,
Stanislav

> Michael
> 
> > ---
> >  drivers/hv/mshv_root_hv_call.c |   56 
> > +++++++++++++++-------------------------
> >  1 file changed, 21 insertions(+), 35 deletions(-)
> > 
> > diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
> > index e5992c324904a..e1f9e28d5a19b 100644
> > --- a/drivers/hv/mshv_root_hv_call.c
> > +++ b/drivers/hv/mshv_root_hv_call.c
> > @@ -195,8 +195,8 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 
> > gfn, u64
> > page_struct_count,
> >     struct hv_input_map_gpa_pages *input_page;
> >     u64 status, *pfnlist;
> >     unsigned long irq_flags, large_shift = 0;
> > -   int ret = 0, done = 0;
> > -   u64 page_count = page_struct_count;
> > +   u64 done = 0, page_count = page_struct_count;
> > +   int ret = 0;
> > 
> >     if (page_count == 0 || (pages && mmio_spa))
> >             return -EINVAL;
> > @@ -213,8 +213,8 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 
> > gfn, u64
> > page_struct_count,
> >     }
> > 
> >     while (done < page_count) {
> > -           ulong i, completed, remain = page_count - done;
> > -           int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
> > +           u64 i, completed, remain = page_count - done;
> > +           u64 rep_count = min_t(u64, remain, HV_MAP_GPA_BATCH_SIZE);
> > 
> >             local_irq_save(irq_flags);
> >             input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
> > @@ -224,23 +224,14 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 
> > gfn, u64
> > page_struct_count,
> >             input_page->map_flags = flags;
> >             pfnlist = input_page->source_gpa_page_list;
> > 
> > -           for (i = 0; i < rep_count; i++)
> > -                   if (flags & HV_MAP_GPA_NO_ACCESS) {
> > +           for (i = 0; i < rep_count; i++) {
> > +                   if (flags & HV_MAP_GPA_NO_ACCESS)
> >                             pfnlist[i] = 0;
> > -                   } else if (pages) {
> > -                           u64 index = (done + i) << large_shift;
> > -
> > -                           if (index >= page_struct_count) {
> > -                                   ret = -EINVAL;
> > -                                   break;
> > -                           }
> > -                           pfnlist[i] = page_to_pfn(pages[index]);
> > -                   } else {
> > +                   else if (pages)
> > +                           pfnlist[i] = page_to_pfn(pages[(done + i) <<
> > +                                                    large_shift]);
> > +                   else
> >                             pfnlist[i] = mmio_spa + done + i;
> > -                   }
> > -           if (ret) {
> > -                   local_irq_restore(irq_flags);
> > -                   break;
> >             }
> > 
> >             status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
> > @@ -248,29 +239,26 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 
> > gfn, u64
> > page_struct_count,
> >             local_irq_restore(irq_flags);
> > 
> >             completed = hv_repcomp(status);
> > +           done += completed;
> > 
> >             if (hv_result_needs_memory(status)) {
> >                     ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
> >                                                 HV_MAP_GPA_DEPOSIT_PAGES);
> >                     if (ret)
> >                             break;
> > -
> >             } else if (!hv_result_success(status)) {
> >                     ret = hv_result_to_errno(status);
> >                     break;
> >             }
> > -
> > -           done += completed;
> >     }
> > 
> >     if (ret && done) {
> >             u32 unmap_flags = 0;
> > 
> > -           if (flags & HV_MAP_GPA_LARGE_PAGE) {
> > +           if (flags & HV_MAP_GPA_LARGE_PAGE)
> >                     unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
> > -                   done <<= large_shift;
> > -           }
> > -           hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
> > +           hv_call_unmap_gpa_pages(partition_id, gfn,
> > +                                   done << large_shift, unmap_flags);
> >     }
> > 
> >     return ret;
> > @@ -305,7 +293,7 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, 
> > u64
> > page_count_4k,
> >     struct hv_input_unmap_gpa_pages *input_page;
> >     u64 status, page_count = page_count_4k;
> >     unsigned long irq_flags, large_shift = 0;
> > -   int ret = 0, done = 0;
> > +   u64 done = 0;
> > 
> >     if (page_count == 0)
> >             return -EINVAL;
> > @@ -319,8 +307,8 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, 
> > u64
> > page_count_4k,
> >     }
> > 
> >     while (done < page_count) {
> > -           ulong completed, remain = page_count - done;
> > -           int rep_count = min(remain, HV_UMAP_GPA_PAGES);
> > +           u64 completed, remain = page_count - done;
> > +           u64 rep_count = min_t(u64, remain, HV_UMAP_GPA_PAGES);
> > 
> >             local_irq_save(irq_flags);
> >             input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
> > @@ -333,15 +321,13 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 
> > gfn, u64
> > page_count_4k,
> >             local_irq_restore(irq_flags);
> > 
> >             completed = hv_repcomp(status);
> > -           if (!hv_result_success(status)) {
> > -                   ret = hv_result_to_errno(status);
> > -                   break;
> > -           }
> > -
> >             done += completed;
> > +
> > +           if (!hv_result_success(status))
> > +                   return hv_result_to_errno(status);
> >     }
> > 
> > -   return ret;
> > +   return 0;
> >  }
> > 
> >  int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 
> > gpa_base_pfn,
> > 
> > 
> 

Reply via email to