On Wed, Nov 25, 2020 at 09:51:33AM +0100, Jan Beulich wrote:
> Except for the initial part of cstar_enter compat/entry.S is all dead
> code in this case. Further, along the lines of the PV conditionals we
> already have in entry.S, make code PV32-conditional there too (to a
> fair part because this code actually references compat/entry.S).
> 
> Signed-off-by: Jan Beulich <jbeul...@suse.com>
> ---
> TBD: I'm on the fence of whether (in a separate patch) to also make
>      conditional struct pv_domain's is_32bit field.
> 
> --- a/xen/arch/x86/x86_64/asm-offsets.c
> +++ b/xen/arch/x86/x86_64/asm-offsets.c
> @@ -9,7 +9,7 @@
>  #include <xen/perfc.h>
>  #endif
>  #include <xen/sched.h>
> -#ifdef CONFIG_PV
> +#ifdef CONFIG_PV32
>  #include <compat/xen.h>
>  #endif
>  #include <asm/hardirq.h>
> @@ -102,19 +102,21 @@ void __dummy__(void)
>      BLANK();
>  #endif
>  
> -#ifdef CONFIG_PV
> +#ifdef CONFIG_PV32
>      OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.pv.is_32bit);
>      BLANK();
>  
> -    OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
> -    OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
> -    BLANK();
> -
>      OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, 
> evtchn_upcall_pending);
>      OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, 
> evtchn_upcall_mask);
>      BLANK();
>  #endif
>  
> +#ifdef CONFIG_PV
> +    OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
> +    OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
> +    BLANK();
> +#endif
> +
>      OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, 
> guest_cpu_user_regs);
>      OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
>      OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
> --- a/xen/arch/x86/x86_64/compat/entry.S
> +++ b/xen/arch/x86/x86_64/compat/entry.S
> @@ -29,8 +29,6 @@ ENTRY(entry_int82)
>          mov   %rsp, %rdi
>          call  do_entry_int82
>  
> -#endif /* CONFIG_PV32 */
> -
>  /* %rbx: struct vcpu */
>  ENTRY(compat_test_all_events)
>          ASSERT_NOT_IN_ATOMIC
> @@ -197,6 +195,8 @@ ENTRY(cr4_pv32_restore)
>          xor   %eax, %eax
>          ret
>  
> +#endif /* CONFIG_PV32 */

I've also wondered, it feels weird to add CONFIG_PV32 gates to the
compat entry.S, since that's supposed to be only used when there's
support for 32bit PV guests?

Wouldn't this file only get built when such support is enabled?

> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -328,8 +328,10 @@ UNLIKELY_END(sysenter_gpf)
>          movq  VCPU_domain(%rbx),%rdi
>          movq  %rax,TRAPBOUNCE_eip(%rdx)
>          movb  %cl,TRAPBOUNCE_flags(%rdx)
> +#ifdef CONFIG_PV32
>          cmpb  $0, DOMAIN_is_32bit_pv(%rdi)
>          jne   compat_sysenter
> +#endif
>          jmp   .Lbounce_exception
>  
>  ENTRY(int80_direct_trap)
> @@ -370,6 +372,7 @@ UNLIKELY_END(msi_check)
>          mov    0x80 * TRAPINFO_sizeof + TRAPINFO_eip(%rsi), %rdi
>          movzwl 0x80 * TRAPINFO_sizeof + TRAPINFO_cs (%rsi), %ecx
>  
> +#ifdef CONFIG_PV32
>          mov   %ecx, %edx
>          and   $~3, %edx
>  
> @@ -378,6 +381,10 @@ UNLIKELY_END(msi_check)
>  
>          test  %rdx, %rdx
>          jz    int80_slow_path
> +#else
> +        test  %rdi, %rdi
> +        jz    int80_slow_path
> +#endif
>  
>          /* Construct trap_bounce from trap_ctxt[0x80]. */
>          lea   VCPU_trap_bounce(%rbx), %rdx
> @@ -390,8 +397,10 @@ UNLIKELY_END(msi_check)
>          lea   (, %rcx, TBF_INTERRUPT), %ecx
>          mov   %cl, TRAPBOUNCE_flags(%rdx)
>  
> +#ifdef CONFIG_PV32
>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>          jne   compat_int80_direct_trap
> +#endif
>  
>          call  create_bounce_frame
>          jmp   test_all_events
> @@ -541,12 +550,16 @@ ENTRY(dom_crash_sync_extable)
>          GET_STACK_END(ax)
>          leaq  STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
>          # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
> +#ifdef CONFIG_PV32
>          movq  STACK_CPUINFO_FIELD(current_vcpu)(%rax), %rax
>          movq  VCPU_domain(%rax),%rax
>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>          sete  %al
>          leal  (%rax,%rax,2),%eax
>          orb   %al,UREGS_cs(%rsp)
> +#else
> +        orb   $3, UREGS_cs(%rsp)
> +#endif
>          xorl  %edi,%edi
>          jmp   asm_domain_crash_synchronous /* Does not return */
>          .popsection
> @@ -562,11 +575,15 @@ ENTRY(ret_from_intr)
>          GET_CURRENT(bx)
>          testb $3, UREGS_cs(%rsp)
>          jz    restore_all_xen
> +#ifdef CONFIG_PV32
>          movq  VCPU_domain(%rbx), %rax
>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>          je    test_all_events
>          jmp   compat_test_all_events
>  #else
> +        jmp   test_all_events
> +#endif
> +#else
>          ASSERT_CONTEXT_IS_XEN
>          jmp   restore_all_xen
>  #endif
> @@ -652,7 +669,7 @@ handle_exception_saved:
>          testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
>          jz    exception_with_ints_disabled
>  
> -#ifdef CONFIG_PV
> +#if defined(CONFIG_PV32)
>          ALTERNATIVE_2 "jmp .Lcr4_pv32_done", \
>              __stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMEP, \
>              __stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMAP
> @@ -692,7 +709,7 @@ handle_exception_saved:
>          test  $~(PFEC_write_access|PFEC_insn_fetch),%eax
>          jz    compat_test_all_events
>  .Lcr4_pv32_done:
> -#else
> +#elif !defined(CONFIG_PV)
>          ASSERT_CONTEXT_IS_XEN
>  #endif /* CONFIG_PV */
>          sti
> @@ -711,9 +728,11 @@ handle_exception_saved:
>  #ifdef CONFIG_PV
>          testb $3,UREGS_cs(%rsp)
>          jz    restore_all_xen
> +#ifdef CONFIG_PV32
>          movq  VCPU_domain(%rbx),%rax
>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>          jne   compat_test_all_events
> +#endif
>          jmp   test_all_events
>  #else
>          ASSERT_CONTEXT_IS_XEN
> @@ -947,11 +966,16 @@ handle_ist_exception:
>          je    1f
>          movl  $EVENT_CHECK_VECTOR,%edi
>          call  send_IPI_self
> -1:      movq  VCPU_domain(%rbx),%rax
> +1:
> +#ifdef CONFIG_PV32
> +        movq  VCPU_domain(%rbx),%rax
>          cmpb  $0,DOMAIN_is_32bit_pv(%rax)
>          je    restore_all_guest
>          jmp   compat_restore_all_guest
>  #else
> +        jmp   restore_all_guest
> +#endif
> +#else
>          ASSERT_CONTEXT_IS_XEN
>          jmp   restore_all_xen
>  #endif

I would like to have Andrew's opinion on this one (as you and him tend
to modify more asm code than myself). There are quite a lot of
addition to the assembly code, and IMO it makes the code more complex
which I think we should try to avoid, as assembly is already hard
enough.

Thanks, Roger.

Reply via email to