H_SVM_INIT_START: Initiate securing a VM
H_SVM_INIT_DONE: Conclude securing a VM

As part of H_SVM_INIT_START register all existing memslots with the UV.
H_SVM_INIT_DONE call by UV informs HV that transition of the guest
to secure mode is complete.

Signed-off-by: Bharata B Rao <bhar...@linux.ibm.com>
---
 arch/powerpc/include/asm/hvcall.h         |  2 ++
 arch/powerpc/include/asm/kvm_book3s_hmm.h | 12 ++++++++
 arch/powerpc/include/asm/kvm_host.h       |  4 +++
 arch/powerpc/include/asm/ultravisor-api.h |  1 +
 arch/powerpc/include/asm/ultravisor.h     |  9 ++++++
 arch/powerpc/kvm/book3s_hv.c              |  7 +++++
 arch/powerpc/kvm/book3s_hv_hmm.c          | 34 +++++++++++++++++++++++
 7 files changed, 69 insertions(+)

diff --git a/arch/powerpc/include/asm/hvcall.h 
b/arch/powerpc/include/asm/hvcall.h
index 05b8536f6653..fa7695928e30 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -343,6 +343,8 @@
 /* Platform-specific hcalls used by the Ultravisor */
 #define H_SVM_PAGE_IN          0xEF00
 #define H_SVM_PAGE_OUT         0xEF04
+#define H_SVM_INIT_START       0xEF08
+#define H_SVM_INIT_DONE                0xEF0C
 
 /* Values for 2nd argument to H_SET_MODE */
 #define H_SET_MODE_RESOURCE_SET_CIABR          1
diff --git a/arch/powerpc/include/asm/kvm_book3s_hmm.h 
b/arch/powerpc/include/asm/kvm_book3s_hmm.h
index 21f3de5f2acb..3e13dab7f690 100644
--- a/arch/powerpc/include/asm/kvm_book3s_hmm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_hmm.h
@@ -11,6 +11,8 @@ extern unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
                                          unsigned long gra,
                                          unsigned long flags,
                                          unsigned long page_shift);
+extern unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
+extern unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
 #else
 static inline unsigned long
 kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra,
@@ -25,5 +27,15 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra,
 {
        return H_UNSUPPORTED;
 }
+
+static inine unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
+{
+       return H_UNSUPPORTED;
+}
+
+static inine unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
+{
+       return H_UNSUPPORTED;
+}
 #endif /* CONFIG_PPC_UV */
 #endif /* __POWERPC_KVM_PPC_HMM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index c0c9c3455ac4..845fd2a73506 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -272,6 +272,10 @@ struct kvm_hpt_info {
 
 struct kvm_resize_hpt;
 
+/* Flag values for kvm_arch.secure_guest */
+#define KVMPPC_SECURE_INIT_START       0x1 /* H_SVM_INIT_START has been called 
*/
+#define KVMPPC_SECURE_INIT_DONE                0x2 /* H_SVM_INIT_DONE 
completed */
+
 struct kvm_arch {
        unsigned int lpid;
        unsigned int smt_mode;          /* # vcpus per virtual core */
diff --git a/arch/powerpc/include/asm/ultravisor-api.h 
b/arch/powerpc/include/asm/ultravisor-api.h
index 51c4e0b5d197..05b17f4351f4 100644
--- a/arch/powerpc/include/asm/ultravisor-api.h
+++ b/arch/powerpc/include/asm/ultravisor-api.h
@@ -20,6 +20,7 @@
 /* opcodes */
 #define UV_WRITE_PATE                  0xF104
 #define UV_RETURN                      0xF11C
+#define UV_REGISTER_MEM_SLOT           0xF120
 #define UV_PAGE_IN                     0xF128
 #define UV_PAGE_OUT                    0xF12C
 
diff --git a/arch/powerpc/include/asm/ultravisor.h 
b/arch/powerpc/include/asm/ultravisor.h
index 1e4c51799b43..9befa6fea8db 100644
--- a/arch/powerpc/include/asm/ultravisor.h
+++ b/arch/powerpc/include/asm/ultravisor.h
@@ -61,6 +61,15 @@ static inline int uv_page_out(u64 lpid, u64 dst_ra, u64 
src_gpa, u64 flags,
        return ucall(UV_PAGE_OUT, retbuf, lpid, dst_ra, src_gpa, flags,
                     page_shift);
 }
+
+static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
+                                      u64 flags, u64 slotid)
+{
+       unsigned long retbuf[UCALL_BUFSIZE];
+
+       return ucall(UV_REGISTER_MEM_SLOT, retbuf, lpid, start_gpa,
+                    size, flags, slotid);
+}
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_ULTRAVISOR_H */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2918616198de..3683e517541f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1098,6 +1098,13 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                                            kvmppc_get_gpr(vcpu, 5),
                                            kvmppc_get_gpr(vcpu, 6));
                break;
+       case H_SVM_INIT_START:
+               ret = kvmppc_h_svm_init_start(vcpu->kvm);
+               break;
+       case H_SVM_INIT_DONE:
+               ret = kvmppc_h_svm_init_done(vcpu->kvm);
+               break;
+
        default:
                return RESUME_HOST;
        }
diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c b/arch/powerpc/kvm/book3s_hv_hmm.c
index 333829682f59..18fec814401d 100644
--- a/arch/powerpc/kvm/book3s_hv_hmm.c
+++ b/arch/powerpc/kvm/book3s_hv_hmm.c
@@ -55,6 +55,40 @@ struct kvmppc_hmm_migrate_args {
        unsigned long page_shift;
 };
 
+unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
+{
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+       int ret = H_SUCCESS;
+       int srcu_idx;
+
+       srcu_idx = srcu_read_lock(&kvm->srcu);
+       slots = kvm_memslots(kvm);
+       kvm_for_each_memslot(memslot, slots) {
+               ret = uv_register_mem_slot(kvm->arch.lpid,
+                                          memslot->base_gfn << PAGE_SHIFT,
+                                          memslot->npages * PAGE_SIZE,
+                                          0, memslot->id);
+               if (ret < 0) {
+                       ret = H_PARAMETER;
+                       goto out;
+               }
+       }
+       kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START;
+out:
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+       return ret;
+}
+
+unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
+{
+       if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
+               return H_UNSUPPORTED;
+
+       kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
+       return H_SUCCESS;
+}
+
 #define KVMPPC_PFN_HMM         (0x1ULL << 61)
 
 static inline bool kvmppc_is_hmm_pfn(unsigned long pfn)
-- 
2.17.1

Reply via email to