In Hyper-V's kvm_hv_flush_tlb(), check targeted vCPUs and store them in a
bitmap before flushing their TLBs. In a subsequent commit, remote TLB
flushes may need to be aborted, so allow checking for that before starting
to enque the flushes.

No functional change intended.

Signed-off-by: Nikolas Wipper <nik...@amazon.de>
---
 arch/x86/kvm/hyperv.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 6e7941ed25ae..e68fbc0c7fc1 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -2134,26 +2134,21 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, 
struct kvm_hv_hcall *hc)
         * analyze it here, flush TLB regardless of the specified address space.
         */
        if (all_cpus && !is_guest_mode(vcpu)) {
+               bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
+
                kvm_for_each_vcpu(i, v, kvm) {
-                       tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
-                       hv_tlb_flush_enqueue(v, tlb_flush_fifo,
-                                            tlb_flush_entries, hc->rep_cnt);
+                       __set_bit(i, vcpu_mask);
                }
-
-               kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
        } else if (!is_guest_mode(vcpu)) {
                sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, 
vcpu_mask);
 
                for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
                        v = kvm_get_vcpu(kvm, i);
-                       if (!v)
+                       if (!v) {
+                               __clear_bit(i, vcpu_mask);
                                continue;
-                       tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
-                       hv_tlb_flush_enqueue(v, tlb_flush_fifo,
-                                            tlb_flush_entries, hc->rep_cnt);
+                       }
                }
-
-               kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, 
vcpu_mask);
        } else {
                struct kvm_vcpu_hv *hv_v;
 
@@ -2181,14 +2176,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, 
struct kvm_hv_hcall *hc)
                                continue;
 
                        __set_bit(i, vcpu_mask);
-                       tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true);
-                       hv_tlb_flush_enqueue(v, tlb_flush_fifo,
-                                            tlb_flush_entries, hc->rep_cnt);
                }
+       }
 
-               kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, 
vcpu_mask);
+       for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
+               v = kvm_get_vcpu(kvm, i);
+
+               tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, 
is_guest_mode(vcpu));
+               hv_tlb_flush_enqueue(v, tlb_flush_fifo,
+                                    tlb_flush_entries, hc->rep_cnt);
        }
 
+       kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
+
 ret_success:
        /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
        return (u64)HV_STATUS_SUCCESS |
-- 
2.40.1




Amazon Web Services Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 257764 B
Sitz: Berlin
Ust-ID: DE 365 538 597


Reply via email to