The basic idea here is:
1. When vCPU's state is RUNSTATE_running,
        - set 'NV' to 'Notification Vector'.
        - Clear 'SN' to accpet PI.
        - set 'NDST' to the right pCPU.
2. When vCPU's state is RUNSTATE_blocked,
        - set 'NV' to 'Wake-up Vector', so we can wake up the
          related vCPU when posted-interrupt happens for it.
        - Clear 'SN' to accpet PI.
3. When vCPU's state is RUNSTATE_runnable/RUNSTATE_offline,
        - Set 'SN' to suppress non-urgent interrupts.
          (Current, we only support non-urgent interrupts)
        - Set 'NV' back to 'Notification Vector' if needed.

Signed-off-by: Feng Wu <feng...@intel.com>
---
v3:
* Use write_atomic() to update 'NV' and 'NDST' fileds.
* Use MASK_INSR() to get the value for 'NDST' field
* Add ASSERT_UNREACHABLE() for the break case in vmx_pi_desc_update()
* Remove pointless NULL assignment to 'vmx_function_table.pi_desc_update'
* Call hvm_funcs.pi_desc_update() in arch-specific files
* coding style

 xen/arch/x86/hvm/hvm.c             |   6 ++
 xen/arch/x86/hvm/vmx/vmx.c         | 122 +++++++++++++++++++++++++++++++++++++
 xen/common/schedule.c              |   4 ++
 xen/include/asm-arm/domain.h       |   2 +
 xen/include/asm-x86/hvm/hvm.h      |   2 +
 xen/include/asm-x86/hvm/vmx/vmcs.h |   7 +++
 xen/include/asm-x86/hvm/vmx/vmx.h  |  11 ++++
 7 files changed, 154 insertions(+)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2736802..64ce381 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6475,6 +6475,12 @@ enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
     return hvm_funcs.nhvm_intr_blocked(v);
 }
 
+void arch_pi_desc_update(struct vcpu *v, int old_state)
+{
+    if ( is_hvm_vcpu(v) && hvm_funcs.pi_desc_update )
+        hvm_funcs.pi_desc_update(v, old_state);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 5795afd..cf4f292 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -168,6 +168,7 @@ static int vmx_vcpu_initialise(struct vcpu *v)
 
     INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocked_vcpu_list);
 
+    v->arch.hvm_vmx.pi_block_cpu = -1;
     return 0;
 }
 
@@ -1778,6 +1779,124 @@ static void vmx_handle_eoi(u8 vector)
     __vmwrite(GUEST_INTR_STATUS, status);
 }
 
+static void vmx_pi_desc_update(struct vcpu *v, int old_state)
+{
+    struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+    struct pi_desc old, new;
+    unsigned long flags;
+
+    ASSERT(iommu_intpost);
+
+    switch ( v->runstate.state )
+    {
+    case RUNSTATE_runnable:
+    case RUNSTATE_offline:
+        /*
+         * We don't need to send notification event to a non-running
+         * vcpu, the interrupt information will be delivered to it before
+         * VM-ENTRY when the vcpu is scheduled to run next time.
+         */
+        pi_set_sn(pi_desc);
+
+        /*
+         * If the state is transferred from RUNSTATE_blocked,
+         * we should set 'NV' feild back to posted_intr_vector,
+         * so the Posted-Interrupts can be delivered to the vCPU
+         * by VT-d HW after it is scheduled to run.
+         */
+        if ( old_state == RUNSTATE_blocked )
+        {
+            write_atomic((uint8_t*)&new.nv, posted_intr_vector);
+
+            /*
+             * Delete the vCPU from the related block list
+             * if we are resuming from blocked state
+             */
+            ASSERT(v->arch.hvm_vmx.pi_block_cpu != -1);
+            spin_lock_irqsave(&per_cpu(pi_blocked_vcpu_lock,
+                              v->arch.hvm_vmx.pi_block_cpu), flags);
+            list_del(&v->arch.hvm_vmx.pi_blocked_vcpu_list);
+            spin_unlock_irqrestore(&per_cpu(pi_blocked_vcpu_lock,
+                                   v->arch.hvm_vmx.pi_block_cpu), flags);
+            v->arch.hvm_vmx.pi_block_cpu = -1;
+        }
+        break;
+
+    case RUNSTATE_blocked:
+        ASSERT(v->arch.hvm_vmx.pi_block_cpu == -1);
+
+        /*
+         * The vCPU is blocked on the block list. Add the blocked
+         * vCPU on the list of the v->arch.hvm_vmx.pi_block_cpu,
+         * which is the destination of the wake-up notification event.
+         */
+        v->arch.hvm_vmx.pi_block_cpu = v->processor;
+        spin_lock_irqsave(&per_cpu(pi_blocked_vcpu_lock,
+                          v->arch.hvm_vmx.pi_block_cpu), flags);
+        list_add_tail(&v->arch.hvm_vmx.pi_blocked_vcpu_list,
+                      &per_cpu(pi_blocked_vcpu, v->arch.hvm_vmx.pi_block_cpu));
+        spin_unlock_irqrestore(&per_cpu(pi_blocked_vcpu_lock,
+                               v->arch.hvm_vmx.pi_block_cpu), flags);
+
+        do {
+            old.control = new.control = pi_desc->control;
+
+            /*
+             * We should not block the vCPU if
+             * an interrupt was posted for it.
+             */
+
+            if ( old.on )
+            {
+                /*
+                 * The vCPU will be removed from the block list
+                 * during its state transferring from RUNSTATE_blocked
+                 * to RUNSTATE_runnable after the following tasklet
+                 * is executed.
+                 */
+                tasklet_schedule(&v->arch.hvm_vmx.pi_vcpu_wakeup_tasklet);
+                return;
+            }
+
+            /*
+             * Change the 'NDST' field to v->arch.hvm_vmx.pi_block_cpu,
+             * so when external interrupts from assigned deivces happen,
+             * wakeup notifiction event will go to
+             * v->arch.hvm_vmx.pi_block_cpu, then in pi_wakeup_interrupt()
+             * we can find the vCPU in the right list to wake up.
+             */
+            if ( x2apic_enabled )
+                new.ndst = cpu_physical_id(v->arch.hvm_vmx.pi_block_cpu);
+            else
+                new.ndst = MASK_INSR(cpu_physical_id(
+                                     v->arch.hvm_vmx.pi_block_cpu),
+                                     PI_xAPIC_NDST_MASK);
+            new.sn = 0;
+            new.nv = pi_wakeup_vector;
+        } while ( cmpxchg(&pi_desc->control, old.control, new.control)
+                  != old.control );
+        break;
+
+    case RUNSTATE_running:
+        ASSERT( pi_desc->sn == 1 );
+
+        if ( x2apic_enabled )
+            write_atomic(&new.ndst, cpu_physical_id(v->processor));
+        else
+            write_atomic(&new.ndst,
+                         MASK_INSR(cpu_physical_id(v->processor),
+                         PI_xAPIC_NDST_MASK));
+
+        pi_clear_sn(pi_desc);
+
+        break;
+
+    default:
+        ASSERT_UNREACHABLE();
+        break;
+    }
+}
+
 void vmx_hypervisor_cpuid_leaf(uint32_t sub_idx,
                                uint32_t *eax, uint32_t *ebx,
                                uint32_t *ecx, uint32_t *edx)
@@ -1989,7 +2108,10 @@ const struct hvm_function_table * __init start_vmx(void)
         alloc_direct_apic_vector(&posted_intr_vector, 
pi_notification_interrupt);
 
         if ( iommu_intpost )
+        {
             alloc_direct_apic_vector(&pi_wakeup_vector, pi_wakeup_interrupt);
+            vmx_function_table.pi_desc_update = vmx_pi_desc_update;
+        }
     }
     else
     {
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 6b02f98..20727d6 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -142,6 +142,7 @@ static inline void vcpu_runstate_change(
     struct vcpu *v, int new_state, s_time_t new_entry_time)
 {
     s_time_t delta;
+    int old_state;
 
     ASSERT(v->runstate.state != new_state);
     ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
@@ -157,7 +158,10 @@ static inline void vcpu_runstate_change(
         v->runstate.state_entry_time = new_entry_time;
     }
 
+    old_state = v->runstate.state;
     v->runstate.state = new_state;
+
+    arch_pi_desc_update(v, old_state);
 }
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index f1a087e..9603cf0 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -265,6 +265,8 @@ static inline unsigned int domain_max_vcpus(const struct 
domain *d)
     return MAX_VIRT_CPUS;
 }
 
+static void arch_pi_desc_update(struct vcpu *v, int old_state) {}
+
 #endif /* __ASM_DOMAIN_H__ */
 
 /*
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index e621c30..e175417 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -510,6 +510,8 @@ bool_t nhvm_vmcx_hap_enabled(struct vcpu *v);
 /* interrupt */
 enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v);
 
+void arch_pi_desc_update(struct vcpu *v, int old_state);
+
 #ifndef NDEBUG
 /* Permit use of the Forced Emulation Prefix in HVM guests */
 extern bool_t opt_hvm_fep;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index b6b34d1..ea8fbe5 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -167,6 +167,13 @@ struct arch_vmx_struct {
     struct tasklet       pi_vcpu_wakeup_tasklet;
 
     struct list_head     pi_blocked_vcpu_list;
+
+    /*
+     * Before vCPU is blocked, it is added to the global per-cpu list
+     * of 'pi_block_cpu', then VT-d engine can send wakeup notification
+     * event to 'pi_block_cpu' and wakeup the related vCPU.
+     */
+    int                  pi_block_cpu;
 };
 
 int vmx_create_vmcs(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index 663af33..ea02e21 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -108,6 +108,7 @@ void vmx_update_cpu_exec_control(struct vcpu *v);
 void vmx_update_secondary_exec_control(struct vcpu *v);
 
 #define POSTED_INTR_ON  0
+#define POSTED_INTR_SN  1
 static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
 {
     return test_and_set_bit(vector, pi_desc->pir);
@@ -133,6 +134,16 @@ static inline unsigned long pi_get_pir(struct pi_desc 
*pi_desc, int group)
     return xchg(&pi_desc->pir[group], 0);
 }
 
+static inline void pi_set_sn(struct pi_desc *pi_desc)
+{
+    set_bit(POSTED_INTR_SN, &pi_desc->control);
+}
+
+static inline void pi_clear_sn(struct pi_desc *pi_desc)
+{
+    clear_bit(POSTED_INTR_SN, &pi_desc->control);
+}
+
 /*
  * Exit Reasons
  */
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to