Now with the mutex protection it's not needed anymore. Signed-off-by: Peter Xu <pet...@redhat.com> --- migration/postcopy-ram.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 9db3eefec4..0b4efdb7fe 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -849,12 +849,12 @@ void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, low_time_offset = get_low_time_offset(dc); if (dc->vcpu_addr[cpu] == 0) { - qatomic_inc(&dc->smp_cpus_down); + dc->smp_cpus_down++; } - qatomic_xchg(&dc->last_begin, low_time_offset); - qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); - qatomic_xchg(&dc->vcpu_addr[cpu], addr); + dc->last_begin = low_time_offset; + dc->page_fault_vcpu_time[cpu] = low_time_offset; + dc->vcpu_addr[cpu] = addr; /* * The caller should only inject a blocktime entry when the page is @@ -915,29 +915,26 @@ static void mark_postcopy_blocktime_end(uintptr_t addr) for (i = 0; i < smp_cpus; i++) { uint32_t vcpu_blocktime = 0; - read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); - if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || - read_vcpu_time == 0) { + read_vcpu_time = dc->page_fault_vcpu_time[i]; + if (dc->vcpu_addr[i] != addr || read_vcpu_time == 0) { continue; } - qatomic_xchg(&dc->vcpu_addr[i], 0); + dc->vcpu_addr[i] = 0; vcpu_blocktime = low_time_offset - read_vcpu_time; affected_cpu += 1; /* we need to know is that mark_postcopy_end was due to * faulted page, another possible case it's prefetched * page and in that case we shouldn't be here */ - if (!vcpu_total_blocktime && - qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { + if (!vcpu_total_blocktime && dc->smp_cpus_down == smp_cpus) { vcpu_total_blocktime = true; } /* continue cycle, due to one page could affect several vCPUs */ dc->vcpu_blocktime[i] += vcpu_blocktime; } - qatomic_sub(&dc->smp_cpus_down, affected_cpu); + dc->smp_cpus_down -= affected_cpu; if (vcpu_total_blocktime) { - dc->total_blocktime += low_time_offset - qatomic_fetch_add( - &dc->last_begin, 0); + dc->total_blocktime += low_time_offset - dc->last_begin; } trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, affected_cpu); -- 2.49.0