---
xen/arch/x86/domctl.c | 2 +-
xen/arch/x86/hvm/hvm.c | 4 ++--
xen/arch/x86/hvm/vpt.c | 2 +-
xen/common/domain.c | 4 ++--
xen/common/domctl.c | 8 ++++----
xen/common/schedule.c | 16 ++++++++--------
6 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 6172c0d..f8a559c 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1213,7 +1213,7 @@ void arch_get_info_guest(struct vcpu *v,
vcpu_guest_context_u c)
c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel));
if ( v->fpu_initialised )
c(flags |= VGCF_i387_valid);
- if ( !test_bit(_VPF_down, &v->pause_flags) )
+ if ( !(v->pause_flags & VPF_down) )
c(flags |= VGCF_online);
if ( !compat )
{
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6afc344..3fa2280 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1728,7 +1728,7 @@ static int hvm_save_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
{
/* We don't need to save state for a vcpu that is down; the restore
* code will leave it down if there is nothing saved. */
- if ( test_bit(_VPF_down, &v->pause_flags) )
+ if ( v->pause_flags & VPF_down )
continue;
/* Architecture-specific vmcs/vmcb bits */
@@ -2512,7 +2512,7 @@ void hvm_vcpu_down(struct vcpu *v)
/* Any other VCPUs online? ... */
domain_lock(d);
for_each_vcpu ( d, v )
- if ( !test_bit(_VPF_down, &v->pause_flags) )
+ if ( !(v->pause_flags & VPF_down) )
online_count++;
domain_unlock(d);
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index 0c8b22e..4fa6566 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -178,7 +178,7 @@ void pt_save_timer(struct vcpu *v)
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
- if ( test_bit(_VPF_blocked, &v->pause_flags) )
+ if ( v->pause_flags & VPF_blocked )
return;
spin_lock(&v->arch.hvm_vcpu.tm_lock);
diff --git a/xen/common/domain.c b/xen/common/domain.c
index cda60a9..7c362eb 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1135,7 +1135,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn,
unsigned offset)
return -EINVAL;
/* Run this command on yourself or on other offline VCPUS. */
- if ( (v != current) && !test_bit(_VPF_down, &v->pause_flags) )
+ if ( (v != current) && !(v->pause_flags & VPF_down) )
return -EINVAL;
page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
@@ -1263,7 +1263,7 @@ long do_vcpu_op(int cmd, unsigned int vcpuid,
XEN_GUEST_HANDLE_PARAM(void) arg)
break;
case VCPUOP_is_up:
- rc = !test_bit(_VPF_down, &v->pause_flags);
+ rc = !(v->pause_flags & VPF_down);
break;
case VCPUOP_get_runstate_info:
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 08de32d..46b967e 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -170,7 +170,7 @@ void getdomaininfo(struct domain *d, struct
xen_domctl_getdomaininfo *info)
vcpu_runstate_get(v, &runstate);
cpu_time += runstate.time[RUNSTATE_running];
info->max_vcpu_id = v->vcpu_id;
- if ( !test_bit(_VPF_down, &v->pause_flags) )
+ if ( !(v->pause_flags & VPF_down) )
{
if ( !(v->pause_flags & VPF_blocked) )
flags &= ~XEN_DOMINF_blocked;
@@ -231,7 +231,7 @@ static unsigned int default_vcpu0_location(cpumask_t
*online)
rcu_read_lock(&domlist_read_lock);
for_each_domain ( d )
for_each_vcpu ( d, v )
- if ( !test_bit(_VPF_down, &v->pause_flags)
+ if ( !(v->pause_flags & VPF_down)
&& ((cpu = v->processor) < nr_cpus) )
cnt[cpu]++;
rcu_read_unlock(&domlist_read_lock);
@@ -944,8 +944,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
vcpu_runstate_get(v, &runstate);
- op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
- op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
+ op->u.getvcpuinfo.online = !(v->pause_flags & VPF_down);
+ op->u.getvcpuinfo.blocked = !!(v->pause_flags & VPF_blocked);
op->u.getvcpuinfo.running = v->is_running;
op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
op->u.getvcpuinfo.cpu = v->processor;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 5ffa1a1..c5f640f 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -119,7 +119,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
if ( unlikely(v->is_urgent) )
{
- if ( !test_bit(_VPF_blocked, &v->pause_flags) ||
+ if ( !(v->pause_flags & VPF_blocked) ||
!test_bit(v->vcpu_id, v->domain->poll_mask) )
{
v->is_urgent = 0;
@@ -128,8 +128,8 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
}
else
{
- if ( unlikely(test_bit(_VPF_blocked, &v->pause_flags) &&
- test_bit(v->vcpu_id, v->domain->poll_mask)) )
+ if ( unlikely(v->pause_flags & VPF_blocked) &&
+ unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
{
v->is_urgent = 1;
atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
@@ -418,7 +418,7 @@ void vcpu_wake(struct vcpu *v)
vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
SCHED_OP(VCPU2OP(v), wake, v);
}
- else if ( !test_bit(_VPF_blocked, &v->pause_flags) )
+ else if ( !(v->pause_flags & VPF_blocked) )
{
if ( v->runstate.state == RUNSTATE_blocked )
vcpu_runstate_change(v, RUNSTATE_offline, NOW());
@@ -595,7 +595,7 @@ void vcpu_force_reschedule(struct vcpu *v)
set_bit(_VPF_migrating, &v->pause_flags);
vcpu_schedule_unlock_irq(lock, v);
- if ( test_bit(_VPF_migrating, &v->pause_flags) )
+ if ( v->pause_flags & VPF_migrating )
{
vcpu_sleep_nosync(v);
vcpu_migrate(v);
@@ -763,7 +763,7 @@ static int vcpu_set_affinity(
domain_update_node_affinity(v->domain);
- if ( test_bit(_VPF_migrating, &v->pause_flags) )
+ if ( v->pause_flags & VPF_migrating )
{
vcpu_sleep_nosync(v);
vcpu_migrate(v);
@@ -1285,7 +1285,7 @@ static void schedule(void)
vcpu_runstate_change(
prev,
- (test_bit(_VPF_blocked, &prev->pause_flags) ? RUNSTATE_blocked :
+ ((prev->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
(vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
now);
prev->last_run_time = now;
@@ -1327,7 +1327,7 @@ void context_saved(struct vcpu *prev)
SCHED_OP(VCPU2OP(prev), context_saved, prev);
- if ( unlikely(test_bit(_VPF_migrating, &prev->pause_flags)) )
+ if ( unlikely(prev->pause_flags & VPF_migrating) )
vcpu_migrate(prev);
}