Theses are use to capure the XIVE EAS table of the KVM device, the
configuration of the source targets.

Signed-off-by: Cédric Le Goater <c...@kaod.org>
---
 arch/powerpc/include/uapi/asm/kvm.h   | 11 ++++
 arch/powerpc/kvm/book3s_xive_native.c | 87 +++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 1a8740629acf..faf024f39858 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -683,9 +683,20 @@ struct kvm_ppc_cpu_char {
 #define   KVM_DEV_XIVE_SAVE_EQ_PAGES   4
 #define KVM_DEV_XIVE_GRP_SOURCES       2       /* 64-bit source attributes */
 #define KVM_DEV_XIVE_GRP_SYNC          3       /* 64-bit source attributes */
+#define KVM_DEV_XIVE_GRP_EAS           4       /* 64-bit eas attributes */
 
 /* Layout of 64-bit XIVE source attribute values */
 #define KVM_XIVE_LEVEL_SENSITIVE       (1ULL << 0)
 #define KVM_XIVE_LEVEL_ASSERTED                (1ULL << 1)
 
+/* Layout of 64-bit eas attribute values */
+#define KVM_XIVE_EAS_PRIORITY_SHIFT    0
+#define KVM_XIVE_EAS_PRIORITY_MASK     0x7
+#define KVM_XIVE_EAS_SERVER_SHIFT      3
+#define KVM_XIVE_EAS_SERVER_MASK       0xfffffff8ULL
+#define KVM_XIVE_EAS_MASK_SHIFT                32
+#define KVM_XIVE_EAS_MASK_MASK         0x100000000ULL
+#define KVM_XIVE_EAS_EISN_SHIFT                33
+#define KVM_XIVE_EAS_EISN_MASK         0xfffffffe00000000ULL
+
 #endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/kvm/book3s_xive_native.c 
b/arch/powerpc/kvm/book3s_xive_native.c
index f2de1bcf3b35..0468b605baa7 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -525,6 +525,88 @@ static int kvmppc_xive_native_sync(struct kvmppc_xive 
*xive, long irq, u64 addr)
        return 0;
 }
 
+static int kvmppc_xive_native_set_eas(struct kvmppc_xive *xive, long irq,
+                                     u64 addr)
+{
+       struct kvmppc_xive_src_block *sb;
+       struct kvmppc_xive_irq_state *state;
+       u64 __user *ubufp = (u64 __user *) addr;
+       u16 src;
+       u64 kvm_eas;
+       u32 server;
+       u8 priority;
+       u32 eisn;
+
+       sb = kvmppc_xive_find_source(xive, irq, &src);
+       if (!sb)
+               return -ENOENT;
+
+       state = &sb->irq_state[src];
+
+       if (!state->valid)
+               return -EINVAL;
+
+       if (get_user(kvm_eas, ubufp))
+               return -EFAULT;
+
+       pr_devel("%s irq=0x%lx eas=%016llx\n", __func__, irq, kvm_eas);
+
+       priority = (kvm_eas & KVM_XIVE_EAS_PRIORITY_MASK) >>
+               KVM_XIVE_EAS_PRIORITY_SHIFT;
+       server = (kvm_eas & KVM_XIVE_EAS_SERVER_MASK) >>
+               KVM_XIVE_EAS_SERVER_SHIFT;
+       eisn = (kvm_eas & KVM_XIVE_EAS_EISN_MASK) >> KVM_XIVE_EAS_EISN_SHIFT;
+
+       if (priority != xive_prio_from_guest(priority)) {
+               pr_err("invalid priority for queue %d for VCPU %d\n",
+                      priority, server);
+               return -EINVAL;
+       }
+
+       return kvmppc_xive_native_set_source_config(xive, sb, state, server,
+                                                   priority, eisn);
+}
+
+static int kvmppc_xive_native_get_eas(struct kvmppc_xive *xive, long irq,
+                                     u64 addr)
+{
+       struct kvmppc_xive_src_block *sb;
+       struct kvmppc_xive_irq_state *state;
+       u64 __user *ubufp = (u64 __user *) addr;
+       u16 src;
+       u64 kvm_eas;
+
+       sb = kvmppc_xive_find_source(xive, irq, &src);
+       if (!sb)
+               return -ENOENT;
+
+       state = &sb->irq_state[src];
+
+       if (!state->valid)
+               return -EINVAL;
+
+       arch_spin_lock(&sb->lock);
+
+       if (state->act_priority == MASKED)
+               kvm_eas = KVM_XIVE_EAS_MASK_MASK;
+       else {
+               kvm_eas = (state->act_priority << KVM_XIVE_EAS_PRIORITY_SHIFT) &
+                       KVM_XIVE_EAS_PRIORITY_MASK;
+               kvm_eas |= (state->act_server << KVM_XIVE_EAS_SERVER_SHIFT) &
+                       KVM_XIVE_EAS_SERVER_MASK;
+               kvm_eas |= ((u64) state->eisn << KVM_XIVE_EAS_EISN_SHIFT) &
+                       KVM_XIVE_EAS_EISN_MASK;
+       }
+       arch_spin_unlock(&sb->lock);
+
+       pr_devel("%s irq=0x%lx eas=%016llx\n", __func__, irq, kvm_eas);
+
+       if (put_user(kvm_eas, ubufp))
+               return -EFAULT;
+
+       return 0;
+}
+
 static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
                                       struct kvm_device_attr *attr)
 {
@@ -544,6 +626,8 @@ static int kvmppc_xive_native_set_attr(struct kvm_device 
*dev,
                                                     attr->addr);
        case KVM_DEV_XIVE_GRP_SYNC:
                return kvmppc_xive_native_sync(xive, attr->attr, attr->addr);
+       case KVM_DEV_XIVE_GRP_EAS:
+               return kvmppc_xive_native_set_eas(xive, attr->attr, attr->addr);
        }
        return -ENXIO;
 }
@@ -564,6 +648,8 @@ static int kvmppc_xive_native_get_attr(struct kvm_device 
*dev,
                        return kvmppc_xive_native_get_vc_base(xive, attr->addr);
                }
                break;
+       case KVM_DEV_XIVE_GRP_EAS:
+               return kvmppc_xive_native_get_eas(xive, attr->attr, attr->addr);
        }
        return -ENXIO;
 }
@@ -583,6 +669,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device 
*dev,
                break;
        case KVM_DEV_XIVE_GRP_SOURCES:
        case KVM_DEV_XIVE_GRP_SYNC:
+       case KVM_DEV_XIVE_GRP_EAS:
                if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
                    attr->attr < KVMPPC_XIVE_NR_IRQS)
                        return 0;
-- 
2.20.1

Reply via email to