Due to the overlap between SVE and SME vector length configuration
created by streaming mode SVE we will finalize both at once.  Rename the
existing finalization to use _VEC (vector) for the naming to avoid
confusion.

Since this includes the userspace API we create an alias
KVM_ARM_VCPU_VEC for the existing KVM_ARM_VCPU_SVE capability, existing
code which does not enable SME will be unaffected and any SME only code
will not need to use SVE constants.

No functional change.

Signed-off-by: Mark Brown <broo...@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h |  6 ++++--
 arch/arm64/include/uapi/asm/kvm.h |  6 ++++++
 arch/arm64/kvm/guest.c            | 10 +++++-----
 arch/arm64/kvm/hyp/nvhe/pkvm.c    |  2 +-
 arch/arm64/kvm/reset.c            | 20 ++++++++++----------
 5 files changed, 26 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 
cef03309fbe6b7f44ab30357ceb037e7e342fb0c..f12b13c3886a2b90d0f1e95f9d59f374d3c87398
 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -874,7 +874,7 @@ struct kvm_vcpu_arch {
 /* KVM_ARM_VCPU_INIT completed */
 #define VCPU_INITIALIZED       __vcpu_single_flag(cflags, BIT(0))
 /* SVE config completed */
-#define VCPU_SVE_FINALIZED     __vcpu_single_flag(cflags, BIT(1))
+#define VCPU_VEC_FINALIZED     __vcpu_single_flag(cflags, BIT(1))
 
 /* Exception pending */
 #define PENDING_EXCEPTION      __vcpu_single_flag(iflags, BIT(0))
@@ -941,6 +941,8 @@ struct kvm_vcpu_arch {
 #define vcpu_has_sve(vcpu)     kvm_has_sve((vcpu)->kvm)
 #endif
 
+#define vcpu_has_vec(vcpu) vcpu_has_sve(vcpu)
+
 #ifdef CONFIG_ARM64_PTR_AUTH
 #define vcpu_has_ptrauth(vcpu)                                         \
        ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||                \
@@ -1423,7 +1425,7 @@ struct kvm *kvm_arch_alloc_vm(void);
 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
 
-#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, 
VCPU_SVE_FINALIZED)
+#define kvm_arm_vcpu_vec_finalized(vcpu) vcpu_get_flag(vcpu, 
VCPU_VEC_FINALIZED)
 
 #define kvm_has_mte(kvm)                                       \
        (system_supports_mte() &&                               \
diff --git a/arch/arm64/include/uapi/asm/kvm.h 
b/arch/arm64/include/uapi/asm/kvm.h
index 
568bf858f3198e2a6f651eb8ae793b54fd49e67a..ea5aaa4b1cfe82862883678bbe47373d53c005f6
 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -106,6 +106,12 @@ struct kvm_regs {
 #define KVM_ARM_VCPU_PTRAUTH_GENERIC   6 /* VCPU uses generic authentication */
 #define KVM_ARM_VCPU_HAS_EL2           7 /* Support nested virtualization */
 
+/*
+ * An alias for _SVE since we finalize VL configuration for both SVE and SME
+ * simultaneously.
+ */
+#define KVM_ARM_VCPU_VEC               KVM_ARM_VCPU_SVE
+
 struct kvm_vcpu_init {
        __u32 target;
        __u32 features[7];
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 
2196979a24a325311d6111404e4d089287c41bfe..73e714133bb647c1d50fc67d5c8bf0569520a537
 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -342,7 +342,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct 
kvm_one_reg *reg)
        if (!vcpu_has_sve(vcpu))
                return -ENOENT;
 
-       if (kvm_arm_vcpu_sve_finalized(vcpu))
+       if (kvm_arm_vcpu_vec_finalized(vcpu))
                return -EPERM; /* too late! */
 
        if (WARN_ON(vcpu->arch.sve_state))
@@ -497,7 +497,7 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct 
kvm_one_reg *reg)
        if (ret)
                return ret;
 
-       if (!kvm_arm_vcpu_sve_finalized(vcpu))
+       if (!kvm_arm_vcpu_vec_finalized(vcpu))
                return -EPERM;
 
        if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
@@ -523,7 +523,7 @@ static int set_sve_reg(struct kvm_vcpu *vcpu, const struct 
kvm_one_reg *reg)
        if (ret)
                return ret;
 
-       if (!kvm_arm_vcpu_sve_finalized(vcpu))
+       if (!kvm_arm_vcpu_vec_finalized(vcpu))
                return -EPERM;
 
        if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
@@ -657,7 +657,7 @@ static unsigned long num_sve_regs(const struct kvm_vcpu 
*vcpu)
                return 0;
 
        /* Policed by KVM_GET_REG_LIST: */
-       WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+       WARN_ON(!kvm_arm_vcpu_vec_finalized(vcpu));
 
        return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
                + 1; /* KVM_REG_ARM64_SVE_VLS */
@@ -675,7 +675,7 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
                return 0;
 
        /* Policed by KVM_GET_REG_LIST: */
-       WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+       WARN_ON(!kvm_arm_vcpu_vec_finalized(vcpu));
 
        /*
         * Enumerate this first, so that userspace can save/restore in
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 
3927fe52a3dde8f59ddb996d674dd252630613a3..5896e581c1fac943cf372f1df24311ef43529d62
 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -381,7 +381,7 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu 
*hyp_vcpu, struct kvm_vcpu *
        struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
 
        if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
-               vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
+               vcpu_clear_flag(vcpu, VCPU_VEC_FINALIZED);
 }
 
 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 
803e11b0dc8f5eb74b07b0ad745b0c4f666713d5..ce726b1d4e8e90cfd4459a6cb9c67b8805424e22
 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -92,7 +92,7 @@ static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
  * Finalize vcpu's maximum SVE vector length, allocating
  * vcpu->arch.sve_state as necessary.
  */
-static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
+static int kvm_vcpu_finalize_vec(struct kvm_vcpu *vcpu)
 {
        void *buf;
        unsigned int vl;
@@ -122,21 +122,21 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
        }
        
        vcpu->arch.sve_state = buf;
-       vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
+       vcpu_set_flag(vcpu, VCPU_VEC_FINALIZED);
        return 0;
 }
 
 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
 {
        switch (feature) {
-       case KVM_ARM_VCPU_SVE:
-               if (!vcpu_has_sve(vcpu))
+       case KVM_ARM_VCPU_VEC:
+               if (!vcpu_has_vec(vcpu))
                        return -EINVAL;
 
-               if (kvm_arm_vcpu_sve_finalized(vcpu))
+               if (kvm_arm_vcpu_vec_finalized(vcpu))
                        return -EPERM;
 
-               return kvm_vcpu_finalize_sve(vcpu);
+               return kvm_vcpu_finalize_vec(vcpu);
        }
 
        return -EINVAL;
@@ -144,7 +144,7 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int 
feature)
 
 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
 {
-       if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
+       if (vcpu_has_vec(vcpu) && !kvm_arm_vcpu_vec_finalized(vcpu))
                return false;
 
        return true;
@@ -161,7 +161,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
        kfree(vcpu->arch.ccsidr);
 }
 
-static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
+static void kvm_vcpu_reset_vec(struct kvm_vcpu *vcpu)
 {
        if (vcpu_has_sve(vcpu))
                memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
@@ -204,11 +204,11 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        if (loaded)
                kvm_arch_vcpu_put(vcpu);
 
-       if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
+       if (!kvm_arm_vcpu_vec_finalized(vcpu)) {
                if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
                        kvm_vcpu_enable_sve(vcpu);
        } else {
-               kvm_vcpu_reset_sve(vcpu);
+               kvm_vcpu_reset_vec(vcpu);
        }
 
        if (vcpu_el1_is_32bit(vcpu))

-- 
2.39.5


Reply via email to