> atomic_mb_set can be a little faster on x86, so: > > diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c > index dfba5ebd29..4452cd9856 100644 > --- a/accel/tcg/cpu-exec.c > +++ b/accel/tcg/cpu-exec.c > @@ -528,12 +528,10 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, > > /* Clear the interrupt flag now since we're processing > * cpu->interrupt_request and cpu->exit_request. > + * Ensure zeroing happens before reading cpu->exit_request or > + * cpu->interrupt_request (see also smp_wmb in cpu_exit()) > */ > - atomic_set(&cpu->icount_decr.u16.high, 0); > - /* Ensure zeroing happens before reading cpu->exit_request or > - * cpu->interrupt_request. (also see cpu_exit()) > - */ > - smp_mb(); > + atomic_mb_set(&cpu->icount_decr.u16.high, 0); > > if (unlikely(atomic_read(&cpu->interrupt_request))) { > int interrupt_request; >
Looks good to me! Thanks! -- Thanks, David / dhildenb