Add variants of sev, sev-es launch path that return the status of the
ioctl call instead of asserting for success. This enables both positive
and negative testing of the path.

No functional impact intended.

Signed-off-by: Pratik R. Sampat <pratikrajesh.sam...@amd.com>
Tested-by: Peter Gonda <pgo...@google.com>
Tested-by: Srikanth Aithal <srait...@amd.com>
---
 .../selftests/kvm/include/x86_64/sev.h        | 22 +++++-
 tools/testing/selftests/kvm/lib/x86_64/sev.c  | 78 +++++++++++++++----
 2 files changed, 80 insertions(+), 20 deletions(-)

diff --git a/tools/testing/selftests/kvm/include/x86_64/sev.h 
b/tools/testing/selftests/kvm/include/x86_64/sev.h
index 82c11c81a956..3998152cc081 100644
--- a/tools/testing/selftests/kvm/include/x86_64/sev.h
+++ b/tools/testing/selftests/kvm/include/x86_64/sev.h
@@ -27,6 +27,12 @@ enum sev_guest_state {
 
 #define GHCB_MSR_TERM_REQ      0x100
 
+/* Variants of the SEV launch path that do not assert the ioctl status */
+int __sev_vm_launch_start(struct kvm_vm *vm, uint32_t policy);
+int __sev_vm_launch_update(struct kvm_vm *vm, uint32_t policy);
+int __sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
+int __sev_vm_launch_finish(struct kvm_vm *vm);
+
 void sev_vm_launch(struct kvm_vm *vm, uint32_t policy);
 void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
 void sev_vm_launch_finish(struct kvm_vm *vm);
@@ -82,15 +88,23 @@ static inline void sev_register_encrypted_memory(struct 
kvm_vm *vm,
        vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
 }
 
-static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
-                                         uint64_t size)
+static inline int __sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
+                                          uint64_t hva, uint64_t size)
 {
        struct kvm_sev_launch_update_data update_data = {
-               .uaddr = (unsigned long)addr_gpa2hva(vm, gpa),
+               .uaddr = hva,
                .len = size,
        };
 
-       vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data);
+       return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data);
+}
+
+static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
+                                         uint64_t hva, uint64_t size)
+{
+       int ret = __sev_launch_update_data(vm, gpa, hva, size);
+
+       TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_LAUNCH_UPDATE_DATA, ret, vm);
 }
 
 #endif /* SELFTEST_KVM_SEV_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/sev.c 
b/tools/testing/selftests/kvm/lib/x86_64/sev.c
index e9535ee20b7f..125a72246e09 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/sev.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/sev.c
@@ -14,15 +14,16 @@
  * and find the first range, but that's correct because the condition
  * expression would cause us to quit the loop.
  */
-static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region 
*region)
+static int encrypt_region(struct kvm_vm *vm, struct userspace_mem_region 
*region)
 {
        const struct sparsebit *protected_phy_pages = 
region->protected_phy_pages;
        const vm_paddr_t gpa_base = region->region.guest_phys_addr;
        const sparsebit_idx_t lowest_page_in_region = gpa_base >> 
vm->page_shift;
        sparsebit_idx_t i, j;
+       int ret;
 
        if (!sparsebit_any_set(protected_phy_pages))
-               return;
+               return 0;
 
        sev_register_encrypted_memory(vm, region);
 
@@ -30,8 +31,15 @@ static void encrypt_region(struct kvm_vm *vm, struct 
userspace_mem_region *regio
                const uint64_t size = (j - i + 1) * vm->page_size;
                const uint64_t offset = (i - lowest_page_in_region) * 
vm->page_size;
 
-               sev_launch_update_data(vm, gpa_base + offset, size);
+               ret = __sev_launch_update_data(vm, gpa_base + offset,
+                                              (uint64_t)addr_gpa2hva(vm, 
gpa_base + offset),
+                                              size);
+               if (ret)
+                       return ret;
+
        }
+
+       return 0;
 }
 
 void sev_vm_init(struct kvm_vm *vm)
@@ -60,38 +68,74 @@ void sev_es_vm_init(struct kvm_vm *vm)
        }
 }
 
-void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
+int __sev_vm_launch_start(struct kvm_vm *vm, uint32_t policy)
 {
        struct kvm_sev_launch_start launch_start = {
                .policy = policy,
        };
+
+       return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);
+}
+
+int __sev_vm_launch_update(struct kvm_vm *vm, uint32_t policy)
+{
        struct userspace_mem_region *region;
-       struct kvm_sev_guest_status status;
        int ctr;
 
-       vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);
-       vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
+       hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
+               int ret = encrypt_region(vm, region);
 
-       TEST_ASSERT_EQ(status.policy, policy);
-       TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE);
-
-       hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)
-               encrypt_region(vm, region);
+               if (ret)
+                       return ret;
+       }
 
        if (policy & SEV_POLICY_ES)
                vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
 
        vm->arch.is_pt_protected = true;
+
+       return 0;
 }
 
-void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
+int __sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
 {
        struct kvm_sev_launch_measure launch_measure;
-       struct kvm_sev_guest_status guest_status;
 
        launch_measure.len = 256;
        launch_measure.uaddr = (__u64)measurement;
-       vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure);
+
+       return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure);
+}
+
+int __sev_vm_launch_finish(struct kvm_vm *vm)
+{
+       return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL);
+}
+
+void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
+{
+       struct kvm_sev_guest_status status;
+       int ret;
+
+       ret = __sev_vm_launch_start(vm, policy);
+       TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_LAUNCH_START, ret, vm);
+
+       vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
+
+       TEST_ASSERT_EQ(status.policy, policy);
+       TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE);
+
+       ret = __sev_vm_launch_update(vm, policy);
+       TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_LAUNCH_UPDATE_DATA, ret, vm);
+}
+
+void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
+{
+       struct kvm_sev_guest_status guest_status;
+       int ret;
+
+       ret = __sev_vm_launch_measure(vm, measurement);
+       TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_LAUNCH_MEASURE, ret, vm);
 
        vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status);
        TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET);
@@ -100,13 +144,15 @@ void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t 
*measurement)
 void sev_vm_launch_finish(struct kvm_vm *vm)
 {
        struct kvm_sev_guest_status status;
+       int ret;
 
        vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
        TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE ||
                    status.state == SEV_GUEST_STATE_LAUNCH_SECRET,
                    "Unexpected guest state: %d", status.state);
 
-       vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL);
+       ret = __sev_vm_launch_finish(vm);
+       TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_LAUNCH_FINISH, ret, vm);
 
        vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
        TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);
-- 
2.34.1


Reply via email to