On 3/17/21 5:22 PM, Fabiano Rosas wrote:
> Nicholas Piggin <npig...@gmail.com> writes:
> 
>> In the interest of minimising the amount of code that is run in
>> "real-mode", don't handle hcalls in real mode in the P9 path.
>>
>> POWER8 and earlier are much more expensive to exit from HV real mode
>> and switch to host mode, because on those processors HV interrupts get
>> to the hypervisor with the MMU off, and the other threads in the core
>> need to be pulled out of the guest, and SLBs all need to be saved,
>> ERATs invalidated, and host SLB reloaded before the MMU is re-enabled
>> in host mode. Hash guests also require a lot of hcalls to run. The
>> XICS interrupt controller requires hcalls to run.
>>
>> By contrast, POWER9 has independent thread switching, and in radix mode
>> the hypervisor is already in a host virtual memory mode when the HV
>> interrupt is taken. Radix + xive guests don't need hcalls to handle
>> interrupts or manage translations.
>>
>> So it's much less important to handle hcalls in real mode in P9.
>>
>> Signed-off-by: Nicholas Piggin <npig...@gmail.com>
>> ---
> 
> <snip>
> 
>> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
>> index 497f216ad724..1f2ba8955c6a 100644
>> --- a/arch/powerpc/kvm/book3s_hv.c
>> +++ b/arch/powerpc/kvm/book3s_hv.c
>> @@ -1147,7 +1147,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
>>   * This has to be done early, not in kvmppc_pseries_do_hcall(), so
>>   * that the cede logic in kvmppc_run_single_vcpu() works properly.
>>   */
>> -static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
>> +static void kvmppc_cede(struct kvm_vcpu *vcpu)
> 
> The comment above needs to be updated I think.
> 
>>  {
>>      vcpu->arch.shregs.msr |= MSR_EE;
>>      vcpu->arch.ceded = 1;
>> @@ -1403,9 +1403,15 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu 
>> *vcpu,
>>              /* hcall - punt to userspace */
>>              int i;
>>
>> -            /* hypercall with MSR_PR has already been handled in rmode,
>> -             * and never reaches here.
>> -             */
>> +            if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
>> +                    /*
>> +                     * Guest userspace executed sc 1, reflect it back as a
>> +                     * privileged program check interrupt.
>> +                     */
>> +                    kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
>> +                    r = RESUME_GUEST;
>> +                    break;
>> +            }
>>
>>              run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
>>              for (i = 0; i < 9; ++i)
>> @@ -3740,15 +3746,36 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu 
>> *vcpu, u64 time_limit,
>>              /* H_CEDE has to be handled now, not later */
>>              if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
>>                  kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
>> -                    kvmppc_nested_cede(vcpu);
>> +                    kvmppc_cede(vcpu);
>>                      kvmppc_set_gpr(vcpu, 3, 0);
>>                      trap = 0;
>>              }
>>      } else {
>>              kvmppc_xive_push_vcpu(vcpu);
>>              trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
>> -            kvmppc_xive_pull_vcpu(vcpu);
>> +            /* H_CEDE has to be handled now, not later */
>> +            /* XICS hcalls must be handled before xive is pulled */
>> +            if (trap == BOOK3S_INTERRUPT_SYSCALL &&
>> +                !(vcpu->arch.shregs.msr & MSR_PR)) {
>> +                    unsigned long req = kvmppc_get_gpr(vcpu, 3);
>>
>> +                    if (req == H_CEDE) {
>> +                            kvmppc_cede(vcpu);
>> +                            kvmppc_xive_cede_vcpu(vcpu); /* may un-cede */
>> +                            kvmppc_set_gpr(vcpu, 3, 0);
>> +                            trap = 0;
>> +                    }
>> +                    if (req == H_EOI || req == H_CPPR ||
>> +                        req == H_IPI || req == H_IPOLL ||
>> +                        req == H_XIRR || req == H_XIRR_X) {
>> +                            unsigned long ret;
>> +
>> +                            ret = kvmppc_xive_xics_hcall(vcpu, req);
>> +                            kvmppc_set_gpr(vcpu, 3, ret);
>> +                            trap = 0;
>> +                    }
>> +            }
> 
> I tried running L2 with xive=off and this code slows down the boot
> considerably. I think we're missing a !vcpu->arch.nested in the
> conditional.

L2 by default will always use the XIVE emulation in QEMU. If you deactivate 
XIVE support in the L2, with "xive=off" in the OS, or "ic-mode=xics" in the 
L1 QEMU, it will use the legacy XICS mode, emulated in the L1 KVM-on-pseries. 

And yes, the QEMU XIVE emulation tends to be faster. I don't exactly know
why. Probably because of less exit/entries ? 

C.


> This may also be missing these checks from kvmppc_pseries_do_hcall:
> 
>               if (kvmppc_xics_enabled(vcpu)) {
>                       if (xics_on_xive()) {
>                               ret = H_NOT_AVAILABLE;
>                               return RESUME_GUEST;
>                       }
>                       ret = kvmppc_xics_hcall(vcpu, req);
>                         (...)
> 
> For H_CEDE there might be a similar situation since we're shadowing the
> code above that runs after H_ENTER_NESTED by setting trap to 0 here.
> 
>> +            kvmppc_xive_pull_vcpu(vcpu);
>>      }
>>
>>      vcpu->arch.slb_max = 0;
>> @@ -4408,8 +4435,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
>>              else
>>                      r = kvmppc_run_vcpu(vcpu);
>>
>> -            if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
>> -                !(vcpu->arch.shregs.msr & MSR_PR)) {
>> +            if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
>> +                    if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
>> +                            r = RESUME_GUEST;
>> +                            continue;
>> +                    }
>>                      trace_kvm_hcall_enter(vcpu);
>>                      r = kvmppc_pseries_do_hcall(vcpu);
>>                      trace_kvm_hcall_exit(vcpu, r);
>> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S 
>> b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> index c11597f815e4..2d0d14ed1d92 100644
>> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> @@ -1397,9 +1397,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>>      mr      r4,r9
>>      bge     fast_guest_return
>>  2:
>> +    /* If we came in through the P9 short path, no real mode hcalls */
>> +    lwz     r0, STACK_SLOT_SHORT_PATH(r1)
>> +    cmpwi   r0, 0
>> +    bne     no_try_real
>>      /* See if this is an hcall we can handle in real mode */
>>      cmpwi   r12,BOOK3S_INTERRUPT_SYSCALL
>>      beq     hcall_try_real_mode
>> +no_try_real:
>>
>>      /* Hypervisor doorbell - exit only if host IPI flag set */
>>      cmpwi   r12, BOOK3S_INTERRUPT_H_DOORBELL
>> diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
>> index 52cdb9e2660a..1e4871bbcad4 100644
>> --- a/arch/powerpc/kvm/book3s_xive.c
>> +++ b/arch/powerpc/kvm/book3s_xive.c
>> @@ -158,6 +158,40 @@ void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
>>  }
>>  EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
>>
>> +void kvmppc_xive_cede_vcpu(struct kvm_vcpu *vcpu)
>> +{
>> +    void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
>> +
>> +    if (!esc_vaddr)
>> +            return;
>> +
>> +    /* we are using XIVE with single escalation */
>> +
>> +    if (vcpu->arch.xive_esc_on) {
>> +            /*
>> +             * If we still have a pending escalation, abort the cede,
>> +             * and we must set PQ to 10 rather than 00 so that we don't
>> +             * potentially end up with two entries for the escalation
>> +             * interrupt in the XIVE interrupt queue.  In that case
>> +             * we also don't want to set xive_esc_on to 1 here in
>> +             * case we race with xive_esc_irq().
>> +             */
>> +            vcpu->arch.ceded = 0;
>> +            /*
>> +             * The escalation interrupts are special as we don't EOI them.
>> +             * There is no need to use the load-after-store ordering offset
>> +             * to set PQ to 10 as we won't use StoreEOI.
>> +             */
>> +            __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
>> +    } else {
>> +            vcpu->arch.xive_esc_on = true;
>> +            mb();
>> +            __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
>> +    }
>> +    mb();
>> +}
>> +EXPORT_SYMBOL_GPL(kvmppc_xive_cede_vcpu);
>> +
>>  /*
>>   * This is a simple trigger for a generic XIVE IRQ. This must
>>   * only be called for interrupts that support a trigger page
>> @@ -2106,6 +2140,32 @@ static int kvmppc_xive_create(struct kvm_device *dev, 
>> u32 type)
>>      return 0;
>>  }
>>
>> +int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
>> +{
>> +    struct kvmppc_vcore *vc = vcpu->arch.vcore;
>> +
>> +    switch (req) {
>> +    case H_XIRR:
>> +            return xive_vm_h_xirr(vcpu);
>> +    case H_CPPR:
>> +            return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
>> +    case H_EOI:
>> +            return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
>> +    case H_IPI:
>> +            return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
>> +                                      kvmppc_get_gpr(vcpu, 5));
>> +    case H_IPOLL:
>> +            return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
>> +    case H_XIRR_X:
>> +            xive_vm_h_xirr(vcpu);
>> +            kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
>> +            return H_SUCCESS;
>> +    }
>> +
>> +    return H_UNSUPPORTED;
>> +}
>> +EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
>> +
>>  int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
>>  {
>>      struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;

Reply via email to