Since the KVM dirty page reporting works on guest physical addresses, we need to clear all of the aliases when a page is migrated, or there is a risk of losing writes to the aliases that were not cleared.
Note that this is only an issue for manual clearing of the bitmap; if the bitmap is cleared at the same time as it's retrieved, all the aliases get cleared correctly. Reported-by: Dr. David Alan Gilbert <dgilb...@redhat.com> Fixes: ff4aa11419242c835b03d274f08f797c129ed7ba Cc: qemu-sta...@nongnu.org Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- accel/kvm/kvm-all.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index e9e6086..315a915 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -588,8 +588,8 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, uint6 * satisfy the KVM interface requirement. Firstly, do the start * page alignment on 64 host pages */ - bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK; - start_delta = start - mem->start_addr - bmap_start; + bmap_start = start & KVM_CLEAR_LOG_MASK; + start_delta = start - bmap_start; bmap_start /= psize; /* @@ -693,8 +693,8 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml, MemoryRegionSection *section) { KVMState *s = kvm_state; - uint64_t start, size; - KVMSlot *mem = NULL; + uint64_t start, size, offset, count; + KVMSlot *mem; int ret, i; if (!s->manual_dirty_log_protect) { @@ -712,22 +712,30 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml, kvm_slots_lock(kml); - /* Find any possible slot that covers the section */ for (i = 0; i < s->nr_slots; i++) { mem = &kml->slots[i]; - if (mem->start_addr <= start && - start + size <= mem->start_addr + mem->memory_size) { + /* Discard slots that are empty or do not overlap the section */ + if (!mem->memory_size || + mem->start_addr > start + size - 1 || + start > mem->start_addr + mem->memory_size - 1) { + continue; + } + + if (start >= mem->start_addr) { + /* The slot starts before section or is aligned to it. */ + offset = start - mem->start_addr; + count = MIN(mem->memory_size - offset, size); + } else { + /* The slot starts after section. */ + offset = 0; + count = MIN(mem->memory_size, size - (mem->start_addr - start)); + } + ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); + if (ret < 0) { break; } } - /* - * We should always find one memslot until this point, otherwise - * there could be something wrong from the upper layer - */ - assert(mem && i != s->nr_slots); - ret = kvm_log_clear_one_slot(mem, kml->as_id, start, size); - kvm_slots_unlock(kml); return ret; -- 1.8.3.1