On Sat, Sep 4, 2021 at 5:56 PM Philippe Mathieu-Daudé <f4...@amsat.org> wrote:
> Following the logic of commit 30493a030ff ("i386: split seg_helper > into user-only and sysemu parts"), move x86_cpu_exec_interrupt() > under sysemu/seg_helper.c. > > Signed-off-by: Philippe Mathieu-Daudé <f4...@amsat.org> > --- > I prefer to not squash this into the previous patch because the > ifdef'ry removal (in previous patch) is not trivial IMO. > --- > target/i386/tcg/seg_helper.c | 64 ---------------------------- > target/i386/tcg/sysemu/seg_helper.c | 65 +++++++++++++++++++++++++++++ > 2 files changed, 65 insertions(+), 64 deletions(-) > Reviewed-By: Warner Losh <i...@bsdimp.com> > diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c > index 13c6e6ee62e..baa905a0cd6 100644 > --- a/target/i386/tcg/seg_helper.c > +++ b/target/i386/tcg/seg_helper.c > @@ -1110,70 +1110,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int > intno, int is_hw) > do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); > } > > -#ifndef CONFIG_USER_ONLY > -bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) > -{ > - X86CPU *cpu = X86_CPU(cs); > - CPUX86State *env = &cpu->env; > - int intno; > - > - interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); > - if (!interrupt_request) { > - return false; > - } > - > - /* Don't process multiple interrupt requests in a single call. > - * This is required to make icount-driven execution deterministic. > - */ > - switch (interrupt_request) { > - case CPU_INTERRUPT_POLL: > - cs->interrupt_request &= ~CPU_INTERRUPT_POLL; > - apic_poll_irq(cpu->apic_state); > - break; > - case CPU_INTERRUPT_SIPI: > - do_cpu_sipi(cpu); > - break; > - case CPU_INTERRUPT_SMI: > - cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); > - cs->interrupt_request &= ~CPU_INTERRUPT_SMI; > - do_smm_enter(cpu); > - break; > - case CPU_INTERRUPT_NMI: > - cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); > - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; > - env->hflags2 |= HF2_NMI_MASK; > - do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); > - break; > - case CPU_INTERRUPT_MCE: > - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; > - do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); > - break; > - case CPU_INTERRUPT_HARD: > - cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); > - cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | > - CPU_INTERRUPT_VIRQ); > - intno = cpu_get_pic_interrupt(env); > - qemu_log_mask(CPU_LOG_TB_IN_ASM, > - "Servicing hardware INT=0x%02x\n", intno); > - do_interrupt_x86_hardirq(env, intno, 1); > - break; > - case CPU_INTERRUPT_VIRQ: > - /* FIXME: this should respect TPR */ > - cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); > - intno = x86_ldl_phys(cs, env->vm_vmcb > - + offsetof(struct vmcb, control.int_vector)); > - qemu_log_mask(CPU_LOG_TB_IN_ASM, > - "Servicing virtual hardware INT=0x%02x\n", intno); > - do_interrupt_x86_hardirq(env, intno, 1); > - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; > - break; > - } > - > - /* Ensure that no TB jump will be modified as the program flow was > changed. */ > - return true; > -} > -#endif /* CONFIG_USER_ONLY */ > - > void helper_lldt(CPUX86State *env, int selector) > { > SegmentCache *dt; > diff --git a/target/i386/tcg/sysemu/seg_helper.c > b/target/i386/tcg/sysemu/seg_helper.c > index 82c0856c417..b425b930f9d 100644 > --- a/target/i386/tcg/sysemu/seg_helper.c > +++ b/target/i386/tcg/sysemu/seg_helper.c > @@ -125,6 +125,71 @@ void x86_cpu_do_interrupt(CPUState *cs) > } > } > > +bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) > +{ > + X86CPU *cpu = X86_CPU(cs); > + CPUX86State *env = &cpu->env; > + int intno; > + > + interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); > + if (!interrupt_request) { > + return false; > + } > + > + /* > + * Don't process multiple interrupt requests in a single call. > + * This is required to make icount-driven execution deterministic. > + */ > + switch (interrupt_request) { > + case CPU_INTERRUPT_POLL: > + cs->interrupt_request &= ~CPU_INTERRUPT_POLL; > + apic_poll_irq(cpu->apic_state); > + break; > + case CPU_INTERRUPT_SIPI: > + do_cpu_sipi(cpu); > + break; > + case CPU_INTERRUPT_SMI: > + cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); > + cs->interrupt_request &= ~CPU_INTERRUPT_SMI; > + do_smm_enter(cpu); > + break; > + case CPU_INTERRUPT_NMI: > + cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); > + cs->interrupt_request &= ~CPU_INTERRUPT_NMI; > + env->hflags2 |= HF2_NMI_MASK; > + do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); > + break; > + case CPU_INTERRUPT_MCE: > + cs->interrupt_request &= ~CPU_INTERRUPT_MCE; > + do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); > + break; > + case CPU_INTERRUPT_HARD: > + cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); > + cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | > + CPU_INTERRUPT_VIRQ); > + intno = cpu_get_pic_interrupt(env); > + qemu_log_mask(CPU_LOG_TB_IN_ASM, > + "Servicing hardware INT=0x%02x\n", intno); > + do_interrupt_x86_hardirq(env, intno, 1); > + break; > + case CPU_INTERRUPT_VIRQ: > + /* FIXME: this should respect TPR */ > + cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); > + intno = x86_ldl_phys(cs, env->vm_vmcb > + + offsetof(struct vmcb, control.int_vector)); > + qemu_log_mask(CPU_LOG_TB_IN_ASM, > + "Servicing virtual hardware INT=0x%02x\n", intno); > + do_interrupt_x86_hardirq(env, intno, 1); > + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; > + break; > + } > + > + /* > + * Ensure that no TB jump will be modified as the program flow was > changed. > + */ > + return true; > +} > + > /* check if Port I/O is allowed in TSS */ > void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size) > { > -- > 2.31.1 > >