On 2/7/25 17:34, Kim Phillips wrote:
> AMD EPYC 5th generation processors have introduced a feature that allows
> the hypervisor to control the SEV_FEATURES that are set for, or by, a
> guest [1].  ALLOWED_SEV_FEATURES can be used by the hypervisor to enforce
> that SEV-ES and SEV-SNP guests cannot enable features that the
> hypervisor does not want to be enabled.
> 
> When ALLOWED_SEV_FEATURES is enabled, a VMRUN will fail if any
> non-reserved bits are 1 in SEV_FEATURES but are 0 in
> ALLOWED_SEV_FEATURES.
> 
> Some SEV_FEATURES - currently PmcVirtualization and SecureAvic
> (see Appendix B, Table B-4) - require an opt-in via ALLOWED_SEV_FEATURES,
> i.e. are off-by-default, whereas all other features are effectively
> on-by-default, but still honor ALLOWED_SEV_FEATURES.
> 
> [1] Section 15.36.20 "Allowed SEV Features", AMD64 Architecture
>     Programmer's Manual, Pub. 24593 Rev. 3.42 - March 2024:
>     https://bugzilla.kernel.org/attachment.cgi?id=306250
> 
> Co-developed-by: Kishon Vijay Abraham I <kvija...@amd.com>
> Signed-off-by: Kishon Vijay Abraham I <kvija...@amd.com>
> Signed-off-by: Kim Phillips <kim.phill...@amd.com>
> ---
>  arch/x86/include/asm/svm.h |  5 ++++-
>  arch/x86/kvm/svm/sev.c     | 17 +++++++++++++++++
>  2 files changed, 21 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index e2fac21471f5..6d94a727cc1a 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -158,7 +158,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
>       u64 avic_physical_id;   /* Offset 0xf8 */
>       u8 reserved_7[8];
>       u64 vmsa_pa;            /* Used for an SEV-ES guest */
> -     u8 reserved_8[720];
> +     u8 reserved_8[40];
> +     u64 allowed_sev_features;       /* Offset 0x138 */
> +     u8 reserved_9[672];
>       /*
>        * Offset 0x3e0, 32 bytes reserved
>        * for use by hypervisor/software.
> @@ -289,6 +291,7 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & 
> AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_
>  #define SVM_SEV_FEAT_RESTRICTED_INJECTION            BIT(3)
>  #define SVM_SEV_FEAT_ALTERNATE_INJECTION             BIT(4)
>  #define SVM_SEV_FEAT_DEBUG_SWAP                              BIT(5)
> +#define SVM_SEV_FEAT_ALLOWED_SEV_FEATURES            BIT_ULL(63)

Hmmm... I believe it is safe to define this bit value, as the Allowed
SEV features VMCB field shows bits 61:0 being used for the allowed
features mask and we know that the SEV_FEATURES field is used in the SEV
Features MSR left-shifted 2 bits, so we only expect bits 61:0 to be used
and bits 62 and 63 will always be reserved. But, given that I think we
need two functions:

- get_allowed_sev_features()
  keeping it as you have it below, where it returns the
  sev->vmsa_features bitmap if SVM_SEV_FEAT_ALLOWED_SEV_FEATURES is set
  or 0 if SVM_SEV_FEAT_ALLOWED_SEV_FEATURES is not set.

- get_vmsa_sev_features()
  which removes the SVM_SEV_FEAT_ALLOWED_SEV_FEATURES bit, since it is
  not defined in the VMSA SEV_FEATURES definition.

>  
>  #define SVM_SEV_FEAT_INT_INJ_MODES           \
>       (SVM_SEV_FEAT_RESTRICTED_INJECTION |    \
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index a2a794c32050..a9e16792cac0 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -894,9 +894,19 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
>       return 0;
>  }
>  
> +static u64 allowed_sev_features(struct kvm_sev_info *sev)
> +{
> +     if (cpu_feature_enabled(X86_FEATURE_ALLOWED_SEV_FEATURES) &&

Not sure if the cpu_feature_enabled() check is necessary, as init should
have failed if SVM_SEV_FEAT_ALLOWED_SEV_FEATURES wasn't set in
sev_supported_vmsa_features.

Thanks,
Tom

> +         (sev->vmsa_features & SVM_SEV_FEAT_ALLOWED_SEV_FEATURES))
> +             return sev->vmsa_features;
> +
> +     return 0;
> +}
> +
>  static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
>                                   int *error)
>  {
> +     struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
>       struct sev_data_launch_update_vmsa vmsa;
>       struct vcpu_svm *svm = to_svm(vcpu);
>       int ret;
> @@ -906,6 +916,8 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, 
> struct kvm_vcpu *vcpu,
>               return -EINVAL;
>       }
>  
> +     svm->vmcb->control.allowed_sev_features = allowed_sev_features(sev);
> +
>       /* Perform some pre-encryption checks against the VMSA */
>       ret = sev_es_sync_vmsa(svm);
>       if (ret)
> @@ -2447,6 +2459,8 @@ static int snp_launch_update_vmsa(struct kvm *kvm, 
> struct kvm_sev_cmd *argp)
>               struct vcpu_svm *svm = to_svm(vcpu);
>               u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
>  
> +             svm->vmcb->control.allowed_sev_features = 
> allowed_sev_features(sev);
> +
>               ret = sev_es_sync_vmsa(svm);
>               if (ret)
>                       return ret;
> @@ -3069,6 +3083,9 @@ void __init sev_hardware_setup(void)
>       sev_supported_vmsa_features = 0;
>       if (sev_es_debug_swap_enabled)
>               sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
> +
> +     if (sev_es_enabled && 
> cpu_feature_enabled(X86_FEATURE_ALLOWED_SEV_FEATURES))
> +             sev_supported_vmsa_features |= 
> SVM_SEV_FEAT_ALLOWED_SEV_FEATURES;
>  }
>  
>  void sev_hardware_unsetup(void)

Reply via email to