On Mon, Apr 08, 2013 at 10:17:49PM +0800, Yang Zhang wrote:
> From: Yang Zhang <yang.z.zh...@intel.com>
> 
> Current interrupt coalescing logci which only used by RTC has conflict
> with Posted Interrupt.
> This patch introduces a new mechinism to use eoi to track interrupt:
> When delivering an interrupt to vcpu, the pending_eoi set to number of
> vcpu that received the interrupt. And decrease it when each vcpu writing
> eoi. No subsequent RTC interrupt can deliver to vcpu until all vcpus
> write eoi.
> 
> Signed-off-by: Yang Zhang <yang.z.zh...@intel.com>
> ---
>  virt/kvm/ioapic.c |   41 ++++++++++++++++++++++++++++++++++++++++-
>  1 files changed, 40 insertions(+), 1 deletions(-)
> 
> diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
> index 8d1f662..197ef97 100644
> --- a/virt/kvm/ioapic.c
> +++ b/virt/kvm/ioapic.c
> @@ -149,6 +149,29 @@ static void kvm_rtc_eoi_tracking_restore_all(struct 
> kvm_ioapic *ioapic)
>           __rtc_irq_eoi_tracking_restore_one(vcpu, vector);
>  }
>  
> +static void rtc_irq_ack_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
> +                     int irq)
rtc_irq_eoi() drop ack. We sometimes call EOI ack, but putting ack and
eoi in the name is to much.

> +{
> +     if (irq != RTC_GSI)
> +             return;
> +
Lets move the check to the caller:
 if (i == RTC_GSI)
    rtc_irq_eoi()

> +     if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map))
> +             --ioapic->rtc_status.pending_eoi;
> +
> +     WARN_ON(ioapic->rtc_status.pending_eoi < 0);
> +}
> +
> +static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq, bool 
> line_status)
rtc_irq_check_coalesced()

> +{
> +     if (irq != RTC_GSI || !line_status)
> +             return false;
> +
> +     if (ioapic->rtc_status.pending_eoi > 0)
> +             return true; /* coalesced */
> +
> +     return false;
> +}
> +
>  static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx,
>               bool line_status)
>  {
> @@ -262,6 +285,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int 
> irq, bool line_status)
>  {
>       union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
>       struct kvm_lapic_irq irqe;
> +     int ret;
>  
>       ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
>                    "vector=%x trig_mode=%x\n",
> @@ -277,7 +301,15 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int 
> irq, bool line_status)
>       irqe.level = 1;
>       irqe.shorthand = 0;
>  
> -     return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
> +     if (irq == RTC_GSI && line_status) {
> +             BUG_ON(ioapic->rtc_status.pending_eoi != 0);
> +             ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
> +                             ioapic->rtc_status.dest_map);
> +             ioapic->rtc_status.pending_eoi = ret;
> +     } else
> +             ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
> +
> +     return ret;
>  }
>  
>  int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
> @@ -301,6 +333,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int 
> irq, int irq_source_id,
>               ret = 1;
>       } else {
>               int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
> +
> +             if (rtc_irq_check(ioapic, irq, line_status)) {
> +                     ret = 0; /* coalesced */
> +                     goto out;
> +             }
>               ioapic->irr |= mask;
>               if ((edge && old_irr != ioapic->irr) ||
>                   (!edge && !entry.fields.remote_irr))
> @@ -308,6 +345,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int 
> irq, int irq_source_id,
>               else
>                       ret = 0; /* report coalesced interrupt */
>       }
> +out:
>       trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
>       spin_unlock(&ioapic->lock);
>  
> @@ -335,6 +373,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
>               if (ent->fields.vector != vector)
>                       continue;
>  
> +             rtc_irq_ack_eoi(ioapic, vcpu, i);
>               /*
>                * We are dropping lock while calling ack notifiers because ack
>                * notifier callbacks for assigned devices call into IOAPIC
> -- 
> 1.7.1

--
                        Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to