On Wed, Nov 25, 2020 at 09:51:33AM +0100, Jan Beulich wrote: > Except for the initial part of cstar_enter compat/entry.S is all dead > code in this case. Further, along the lines of the PV conditionals we > already have in entry.S, make code PV32-conditional there too (to a > fair part because this code actually references compat/entry.S). > > Signed-off-by: Jan Beulich <jbeul...@suse.com> > --- > TBD: I'm on the fence of whether (in a separate patch) to also make > conditional struct pv_domain's is_32bit field. > > --- a/xen/arch/x86/x86_64/asm-offsets.c > +++ b/xen/arch/x86/x86_64/asm-offsets.c > @@ -9,7 +9,7 @@ > #include <xen/perfc.h> > #endif > #include <xen/sched.h> > -#ifdef CONFIG_PV > +#ifdef CONFIG_PV32 > #include <compat/xen.h> > #endif > #include <asm/hardirq.h> > @@ -102,19 +102,21 @@ void __dummy__(void) > BLANK(); > #endif > > -#ifdef CONFIG_PV > +#ifdef CONFIG_PV32 > OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.pv.is_32bit); > BLANK(); > > - OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending); > - OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask); > - BLANK(); > - > OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, > evtchn_upcall_pending); > OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, > evtchn_upcall_mask); > BLANK(); > #endif > > +#ifdef CONFIG_PV > + OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending); > + OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask); > + BLANK(); > +#endif > + > OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, > guest_cpu_user_regs); > OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel); > OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu); > --- a/xen/arch/x86/x86_64/compat/entry.S > +++ b/xen/arch/x86/x86_64/compat/entry.S > @@ -29,8 +29,6 @@ ENTRY(entry_int82) > mov %rsp, %rdi > call do_entry_int82 > > -#endif /* CONFIG_PV32 */ > - > /* %rbx: struct vcpu */ > ENTRY(compat_test_all_events) > ASSERT_NOT_IN_ATOMIC > @@ -197,6 +195,8 @@ ENTRY(cr4_pv32_restore) > xor %eax, %eax > ret > > +#endif /* CONFIG_PV32 */ > + > .section .text.entry, "ax", @progbits > > /* See lstar_enter for entry register state. */ > @@ -230,6 +230,13 @@ ENTRY(cstar_enter) > sti > > movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx > + > +#ifndef CONFIG_PV32 > + > + jmp switch_to_kernel > + > +#else > + > movq VCPU_domain(%rbx),%rcx > cmpb $0,DOMAIN_is_32bit_pv(%rcx) > je switch_to_kernel > @@ -393,3 +400,5 @@ compat_crash_page_fault: > jmp .Lft14 > .previous > _ASM_EXTABLE(.Lft14, .Lfx14) > + > +#endif /* CONFIG_PV32 */
Seeing this chunk, would it make sense to move the cstar_enter part relevant to !is_32bit_pv into the common entry.S and leave the rest here as compat_cstar_enter or some such? AFAICT we only need a tiny part of the compat stuff when !CONFIG_PV32, so I think we could make the hole compat/entry.S depend on CONFIG_PV32. Thanks, Roger.