When confidential-guest-support is enabled for the virt machine, call the RME init function, and add the RME flag to the VM type.
* The Realm differentiates non-secure from realm memory using the upper GPA bit. Reserve that bit when creating the memory map, to make sure that device MMIO located in high memory can still fit. * pvtime is disabled for the moment. Since the hypervisor has to write into the shared pvtime page before scheduling a vcpu, it seems incompatible with confidential guests. Signed-off-by: Jean-Philippe Brucker <jean-phili...@linaro.org> --- hw/arm/virt.c | 48 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/hw/arm/virt.c b/hw/arm/virt.c index b871350856..df613e634a 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -210,6 +210,11 @@ static const char *valid_cpus[] = { ARM_CPU_TYPE_NAME("max"), }; +static bool virt_machine_is_confidential(VirtMachineState *vms) +{ + return MACHINE(vms)->cgs; +} + static bool cpu_type_valid(const char *cpu) { int i; @@ -247,6 +252,14 @@ static void create_fdt(VirtMachineState *vms) exit(1); } + /* + * Since the devicetree is included in the initial measurement, it must + * not contain random data. + */ + if (virt_machine_is_confidential(vms)) { + vms->dtb_randomness = false; + } + ms->fdt = fdt; /* Header */ @@ -1924,6 +1937,15 @@ static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem) steal_time = object_property_get_bool(OBJECT(first_cpu), "kvm-steal-time", NULL); + if (virt_machine_is_confidential(vms)) { + /* + * The host cannot write into a confidential guest's memory until the + * guest shares it. Since the host writes the pvtime region before the + * guest gets a chance to set it up, disable pvtime. + */ + steal_time = false; + } + if (kvm_enabled()) { hwaddr pvtime_reg_base = vms->memmap[VIRT_PVTIME].base; hwaddr pvtime_reg_size = vms->memmap[VIRT_PVTIME].size; @@ -2053,10 +2075,11 @@ static void machvirt_init(MachineState *machine) * if the guest has EL2 then we will use SMC as the conduit, * and otherwise we will use HVC (for backwards compatibility and * because if we're using KVM then we must use HVC). + * Realm guests must also use SMC. */ if (vms->secure && firmware_loaded) { vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; - } else if (vms->virt) { + } else if (vms->virt || virt_machine_is_confidential(vms)) { vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC; } else { vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC; @@ -2102,6 +2125,8 @@ static void machvirt_init(MachineState *machine) exit(1); } + kvm_arm_rme_init(machine->cgs, &error_fatal); + create_fdt(vms); assert(possible_cpus->len == max_cpus); @@ -2854,15 +2879,26 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, static int virt_kvm_type(MachineState *ms, const char *type_str) { VirtMachineState *vms = VIRT_MACHINE(ms); + int rme_vm_type = kvm_arm_rme_vm_type(ms); int max_vm_pa_size, requested_pa_size; + int rme_reserve_bit = 0; bool fixed_ipa; - max_vm_pa_size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa); + if (rme_vm_type) { + /* + * With RME, the upper GPA bit differentiates Realm from NS memory. + * Reserve the upper bit to guarantee that highmem devices will fit. + */ + rme_reserve_bit = 1; + } + + max_vm_pa_size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa) - + rme_reserve_bit; /* we freeze the memory map to compute the highest gpa */ virt_set_memmap(vms, max_vm_pa_size); - requested_pa_size = 64 - clz64(vms->highest_gpa); + requested_pa_size = 64 - clz64(vms->highest_gpa) + rme_reserve_bit; /* * KVM requires the IPA size to be at least 32 bits. @@ -2883,7 +2919,11 @@ static int virt_kvm_type(MachineState *ms, const char *type_str) * the implicit legacy 40b IPA setting, in which case the kvm_type * must be 0. */ - return fixed_ipa ? 0 : requested_pa_size; + if (fixed_ipa) { + return 0; + } + + return requested_pa_size | rme_vm_type; } static void virt_machine_class_init(ObjectClass *oc, void *data) -- 2.39.0