This ensures that preempt_count will be accounted correctly during in-hypevisor context switches.
Signed-off-by: Volodymyr Babchuk <volodymyr_babc...@epam.com> --- xen/common/preempt.c | 6 +++--- xen/include/xen/preempt.h | 9 ++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/xen/common/preempt.c b/xen/common/preempt.c index 3b4178fd44..ad61c8419a 100644 --- a/xen/common/preempt.c +++ b/xen/common/preempt.c @@ -23,17 +23,17 @@ #include <xen/irq.h> #include <asm/system.h> -DEFINE_PER_CPU(unsigned int, __preempt_count); +DEFINE_PER_CPU(atomic_t, __preempt_count); bool_t in_atomic(void) { - return preempt_count() || in_irq() || !local_irq_is_enabled(); + return atomic_read(&preempt_count()) || in_irq() || local_irq_is_enabled(); } #ifndef NDEBUG void ASSERT_NOT_IN_ATOMIC(void) { - ASSERT(!preempt_count()); + ASSERT(!atomic_read(&preempt_count())); ASSERT(!in_irq()); ASSERT(local_irq_is_enabled()); } diff --git a/xen/include/xen/preempt.h b/xen/include/xen/preempt.h index bef83135a1..e217900d6e 100644 --- a/xen/include/xen/preempt.h +++ b/xen/include/xen/preempt.h @@ -9,21 +9,20 @@ #ifndef __XEN_PREEMPT_H__ #define __XEN_PREEMPT_H__ +#include <asm/atomic.h> #include <xen/types.h> #include <xen/percpu.h> -DECLARE_PER_CPU(unsigned int, __preempt_count); +DECLARE_PER_CPU(atomic_t, __preempt_count); #define preempt_count() (this_cpu(__preempt_count)) #define preempt_disable() do { \ - preempt_count()++; \ - barrier(); \ + atomic_inc(&preempt_count()); \ } while (0) #define preempt_enable() do { \ - barrier(); \ - preempt_count()--; \ + atomic_dec(&preempt_count()); \ } while (0) bool_t in_atomic(void); -- 2.29.2