On 08/01/15 15:22, Jan Beulich wrote:
> - drop clearing of excessive multicall arguments in compat case (no
>   longer needed now that hypercall_xlat_continuation() only checks the
>   actual arguments)
> - latch current into a local variable
> - use the cached value of hvm_guest_x86_mode() instead of re-executing
>   it
> - scope restrict "regs"
> - while at it, convert the remaining two argument checking BUG_ON()s in
>   hypercall_xlat_continuation() to ASSERT()s
>
> Signed-off-by: Jan Beulich <jbeul...@suse.com>

Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

There appear to other many other places which could benifit from a
caching of guest_x86_mode() (especially in the nested virt case).  Is it
worth considering unconditionally calculating on vmexit and removing the
function?

>
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1675,7 +1675,6 @@ unsigned long hypercall_create_continuat
>      unsigned int op, const char *format, ...)
>  {
>      struct mc_state *mcs = &current->mc_state;
> -    struct cpu_user_regs *regs;
>      const char *p = format;
>      unsigned long arg;
>      unsigned int i;
> @@ -1689,26 +1688,23 @@ unsigned long hypercall_create_continuat
>  
>          for ( i = 0; *p != '\0'; i++ )
>              mcs->call.args[i] = next_arg(p, args);
> -        if ( is_pv_32on64_domain(current->domain) )
> -        {
> -            for ( ; i < 6; i++ )
> -                mcs->call.args[i] = 0;
> -        }
>      }
>      else
>      {
> -        regs       = guest_cpu_user_regs();
> -        regs->eax  = op;
> +        struct cpu_user_regs *regs = guest_cpu_user_regs();
> +        struct vcpu *curr = current;
> +
> +        regs->eax = op;
>  
>          /* Ensure the hypercall trap instruction is re-executed. */
> -        if ( is_pv_vcpu(current) )
> +        if ( is_pv_vcpu(curr) )
>              regs->eip -= 2;  /* re-execute 'syscall' / 'int $xx' */
>          else
> -            current->arch.hvm_vcpu.hcall_preempted = 1;
> +            curr->arch.hvm_vcpu.hcall_preempted = 1;
>  
> -        if ( is_pv_vcpu(current) ?
> -             !is_pv_32on64_vcpu(current) :
> -             (hvm_guest_x86_mode(current) == 8) )
> +        if ( is_pv_vcpu(curr) ?
> +             !is_pv_32on64_vcpu(curr) :
> +             curr->arch.hvm_vcpu.hcall_64bit )
>          {
>              for ( i = 0; *p != '\0'; i++ )
>              {
> @@ -1759,9 +1755,8 @@ int hypercall_xlat_continuation(unsigned
>  
>      ASSERT(nr <= ARRAY_SIZE(mcs->call.args));
>      ASSERT(!(mask >> nr));
> -
> -    BUG_ON(id && *id >= nr);
> -    BUG_ON(id && (mask & (1U << *id)));
> +    ASSERT(!id || *id < nr);
> +    ASSERT(!id || !(mask & (1U << *id)));
>  
>      va_start(args, mask);
>  
>
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to