Use set_bit_atomic() and bitmap_set_atomic() so that multiple threads can dirty memory without race conditions.
Signed-off-by: Stefan Hajnoczi <stefa...@redhat.com> --- I had to get creative to stay under 80 characters per line. I'm open to suggestions if you prefer me to format it another way. --- include/exec/ram_addr.h | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 8fc75cd..ba90daa 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -93,30 +93,32 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client) { assert(client < DIRTY_MEMORY_NUM); - set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]); + set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]); } static inline void cpu_physical_memory_set_dirty_range_nocode(ram_addr_t start, ram_addr_t length) { unsigned long end, page; + unsigned long **dirty_memory = ram_list.dirty_memory; end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; page = start >> TARGET_PAGE_BITS; - bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page); - bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page); + bitmap_set_atomic(dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page); + bitmap_set_atomic(dirty_memory[DIRTY_MEMORY_VGA], page, end - page); } static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length) { unsigned long end, page; + unsigned long **dirty_memory = ram_list.dirty_memory; end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; page = start >> TARGET_PAGE_BITS; - bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page); - bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page); - bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page); + bitmap_set_atomic(dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page); + bitmap_set_atomic(dirty_memory[DIRTY_MEMORY_VGA], page, end - page); + bitmap_set_atomic(dirty_memory[DIRTY_MEMORY_CODE], page, end - page); xen_modified_memory(start, length); } @@ -142,10 +144,11 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, for (k = 0; k < nr; k++) { if (bitmap[k]) { unsigned long temp = leul_to_cpu(bitmap[k]); + unsigned long **d = ram_list.dirty_memory; - ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp; - ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp; - ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp; + atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp); + atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp); + atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp); } } xen_modified_memory(start, pages); -- 2.1.0