Which reduces the added entry/exit overhead down to ~= 30 cycles.

Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>

Index: kvm-new/arch/x86/kvm/timer.c
===================================================================
--- kvm-new.orig/arch/x86/kvm/timer.c
+++ kvm-new/arch/x86/kvm/timer.c
@@ -135,14 +135,15 @@ ktime_t kvm_vcpu_next_timer_event(struct
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
 {
        struct kvm_timer *ktimer, *n;
-       ktime_t now = ktime_get();
 
        list_for_each_entry_safe(ktimer, n, &vcpu->arch.timers, vcpu_timer) {
-               ktime_t expire;
+               ktime_t expire, now;
 
                if (!ktimer->can_inject)
                        continue;
 
+               now = ktime_get();
+
                expire = kvm_timer_next_event(ktimer);
                if (ktime_to_ns(now) < ktime_to_ns(expire))
                        continue;
@@ -173,8 +174,12 @@ void kvm_vcpu_arm_exit(struct kvm_vcpu *
 {
        ktime_t expire;
        ktime_t now;
-       struct kvm_timer *ktimer = kvm_vcpu_injectable_timer_event(vcpu);
+       struct kvm_timer *ktimer;
+
+       if (hrtimer_active(&vcpu->arch.exit_timer))
+               return;
 
+       ktimer = kvm_vcpu_injectable_timer_event(vcpu);
        if (!ktimer)
                return;
 
Index: kvm-new/arch/x86/kvm/x86.c
===================================================================
--- kvm-new.orig/arch/x86/kvm/x86.c
+++ kvm-new/arch/x86/kvm/x86.c
@@ -3567,8 +3567,6 @@ static int vcpu_enter_guest(struct kvm_v
 
        preempt_enable();
 
-       kvm_vcpu_cleanup_timer(vcpu);
-
        down_read(&vcpu->kvm->slots_lock);
 
        /*

-- 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to