Region unmapping and no-access remapping do not require per-PFN iteration: all GFNs in a region are guaranteed mapped (valid PFNs with access permissions, holes with no-access), so they can be processed in bulk.
Split GFN ranges into large-page-aligned chunks and use HV_MAP_GPA_LARGE_PAGE / HV_UNMAP_GPA_LARGE_PAGE flags for the aligned portions. This eliminates PFN traversal and reduces hypercalls for large regions. Signed-off-by: Stanislav Kinsburskii <[email protected]> --- drivers/hv/mshv_regions.c | 77 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 64 insertions(+), 13 deletions(-) diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c index f81951ae3f808..cb42ee49c2e2f 100644 --- a/drivers/hv/mshv_regions.c +++ b/drivers/hv/mshv_regions.c @@ -20,11 +20,17 @@ /* Process memory regions in chunks to avoid soft lockups and livelock */ #define MSHV_MAX_PFN_BATCH (SZ_2M / PAGE_SIZE) +/* Hypervisor base pages per large page (2 MiB / 4 KiB) */ +#define HV_PFNS_PER_LARGE_PAGE (SZ_2M / HV_HYP_PAGE_SIZE) + #define MSHV_MAP_FAULT_IN_PAGES \ (PTRS_PER_PMD * max_t(unsigned long, 1, PAGE_SIZE / HV_HYP_PAGE_SIZE)) #define MSHV_INVALID_PFN ULONG_MAX +typedef int (*gfn_handler_t)(struct mshv_region *region, + u64 gfn, u64 count, u32 flags); + static const struct mmu_interval_notifier_ops mshv_region_mni_ops; static inline bool mshv_pfn_valid(unsigned long pfn) @@ -426,24 +432,54 @@ static int mshv_region_pin(struct mshv_region *region) return ret < 0 ? ret : -ENOMEM; } -static int mshv_region_chunk_unmap(struct mshv_region *region, - u32 flags, - u64 pfn_offset, u64 pfn_count, - bool huge_page) +/* + * Split a GFN range into head (unaligned), large-page-aligned middle, + * and tail, invoking @fn for each non-empty piece. + */ +static int mshv_region_for_each_gfn_chunk(struct mshv_region *region, + u64 gfn, u64 nr_pfns, + u32 base_flags, u32 large_flag, + gfn_handler_t fn) { - if (huge_page) - flags |= HV_UNMAP_GPA_LARGE_PAGE; + u64 head, aligned, tail; + int ret; + + head = min(ALIGN(gfn, HV_PFNS_PER_LARGE_PAGE) - gfn, nr_pfns); + aligned = ALIGN_DOWN(nr_pfns - head, HV_PFNS_PER_LARGE_PAGE); + tail = nr_pfns - head - aligned; + + if (head) { + ret = fn(region, gfn, head, base_flags); + if (ret) + return ret; + } + if (aligned) { + ret = fn(region, gfn + head, aligned, + base_flags | large_flag); + if (ret) + return ret; + } + if (tail) { + ret = fn(region, gfn + head + aligned, tail, base_flags); + if (ret) + return ret; + } + return 0; +} +static int mshv_unmap_gfns(struct mshv_region *region, + u64 gfn, u64 count, u32 flags) +{ return hv_call_unmap_pfns(region->partition->pt_id, - region->start_gfn + pfn_offset, - pfn_count, flags); + gfn, count, flags); } static int mshv_region_unmap(struct mshv_region *region) { - return mshv_region_process_range(region, 0, - 0, region->nr_pfns, - mshv_region_chunk_unmap); + return mshv_region_for_each_gfn_chunk(region, region->start_gfn, + region->nr_pfns, + 0, HV_UNMAP_GPA_LARGE_PAGE, + mshv_unmap_gfns); } static void mshv_region_destroy(struct kref *ref) @@ -671,6 +707,22 @@ bool mshv_region_handle_gfn_fault(struct mshv_region *region, u64 gfn) return !ret; } +static int mshv_map_no_access_gfns(struct mshv_region *region, + u64 gfn, u64 count, u32 flags) +{ + return hv_call_map_ram_pfns(region->partition->pt_id, + gfn, count, flags, NULL); +} + +static int mshv_region_map_no_access(struct mshv_region *region, + u64 pfn_offset, u64 pfn_count) +{ + return mshv_region_for_each_gfn_chunk(region, + region->start_gfn + pfn_offset, pfn_count, + HV_MAP_GPA_NO_ACCESS, HV_MAP_GPA_LARGE_PAGE, + mshv_map_no_access_gfns); +} + /** * mshv_region_interval_invalidate - Invalidate a range of memory region * @mni: Pointer to the mmu_interval_notifier structure @@ -714,8 +766,7 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - ret = mshv_region_remap_pfns(region, HV_MAP_GPA_NO_ACCESS, - pfn_offset, pfn_count); + ret = mshv_region_map_no_access(region, pfn_offset, pfn_count); if (ret) goto out_unlock;

