From: Wanpeng Li <wanpen...@tencent.com>

The NMI delivery mode of ICR is used to deliver an NMI to the processor, 
and the vector information is ignored.

Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Radim Krčmář <rkrc...@redhat.com>
Cc: Vitaly Kuznetsov <vkuzn...@redhat.com>
Signed-off-by: Wanpeng Li <wanpen...@tencent.com>
---
 arch/x86/kernel/kvm.c | 15 ++++++++++++---
 arch/x86/kvm/x86.c    | 16 +++++++++++-----
 2 files changed, 23 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 57eb4a2..3456531 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -458,7 +458,7 @@ static void __init sev_map_percpu_data(void)
 static int __send_ipi_mask(const struct cpumask *mask, int vector)
 {
        unsigned long flags;
-       int cpu, apic_id, min = 0, max = 0, ret = 0;
+       int cpu, apic_id, min = 0, max = 0, ret = 0, icr = 0;
 #ifdef CONFIG_X86_64
        __uint128_t ipi_bitmap = 0;
        int cluster_size = 128;
@@ -472,6 +472,15 @@ static int __send_ipi_mask(const struct cpumask *mask, int 
vector)
 
        local_irq_save(flags);
 
+       switch (vector) {
+       default:
+               icr = APIC_DM_FIXED | vector;
+               break;
+       case NMI_VECTOR:
+               icr = APIC_DM_NMI;
+               break;
+       }
+
        for_each_cpu(cpu, mask) {
                apic_id = per_cpu(x86_cpu_to_apicid, cpu);
                if (!ipi_bitmap) {
@@ -483,7 +492,7 @@ static int __send_ipi_mask(const struct cpumask *mask, int 
vector)
                        max = apic_id < max ? max : apic_id;
                } else {
                        ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned 
long)ipi_bitmap,
-                               (unsigned long)(ipi_bitmap >> BITS_PER_LONG), 
min, vector);
+                               (unsigned long)(ipi_bitmap >> BITS_PER_LONG), 
min, icr);
                        min = max = apic_id;
                        ipi_bitmap = 0;
                }
@@ -492,7 +501,7 @@ static int __send_ipi_mask(const struct cpumask *mask, int 
vector)
 
        if (ipi_bitmap) {
                ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
-                       (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, 
vector);
+                       (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
        }
 
        local_irq_restore(flags);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a43a29f..c118040 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6695,17 +6695,23 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, 
unsigned long flags, int apicid)
  * Return 0 if successfully added and 1 if discarded.
  */
 static int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
-               unsigned long ipi_bitmap_high, int min, int vector, int 
op_64_bit)
+               unsigned long ipi_bitmap_high, int min, unsigned long icr, int 
op_64_bit)
 {
        int i;
        struct kvm_apic_map *map;
        struct kvm_vcpu *vcpu;
-       struct kvm_lapic_irq irq = {
-               .delivery_mode = APIC_DM_FIXED,
-               .vector = vector,
-       };
+       struct kvm_lapic_irq irq = {0};
        int cluster_size = op_64_bit ? 64 : 32;
 
+       switch (icr & APIC_VECTOR_MASK) {
+       default:
+               irq.vector = icr & APIC_VECTOR_MASK;
+               break;
+       case NMI_VECTOR:
+               break;
+       }
+       irq.delivery_mode = icr & APIC_MODE_MASK;
+
        rcu_read_lock();
        map = rcu_dereference(kvm->arch.apic_map);
 
-- 
2.7.4

Reply via email to