On Fri, Oct 03, 2014 at 06:47:43PM +0100, Dr. David Alan Gilbert (git) wrote: > From: "Dr. David Alan Gilbert" <dgilb...@redhat.com> > > When transmitting RAM pages, consume pages that have been queued by > MIG_RPCOMM_REQPAGE commands and send them ahead of normal page scanning. > > Note: > a) After a queued page the linear walk carries on from after the > unqueued page; there is a reasonable chance that the destination > was about to ask for other closeby pages anyway. > > b) We have to be careful of any assumptions that the page walking > code makes, in particular it does some short cuts on its first linear > walk that break as soon as we do a queued page. > > Signed-off-by: Dr. David Alan Gilbert <dgilb...@redhat.com> > --- > arch_init.c | 149 > ++++++++++++++++++++++++++++++++++++++++++++++++++---------- > 1 file changed, 125 insertions(+), 24 deletions(-) > > diff --git a/arch_init.c b/arch_init.c > index 72f9e17..a945990 100644 > --- a/arch_init.c > +++ b/arch_init.c > @@ -331,6 +331,7 @@ static RAMBlock *last_seen_block; > /* This is the last block from where we have sent data */ > static RAMBlock *last_sent_block; > static ram_addr_t last_offset; > +static bool last_was_from_queue; > static unsigned long *migration_bitmap; > static uint64_t migration_dirty_pages; > static uint32_t last_version; > @@ -460,6 +461,19 @@ static inline bool migration_bitmap_set_dirty(ram_addr_t > addr) > return ret; > } > > +static inline bool migration_bitmap_clear_dirty(ram_addr_t addr) > +{ > + bool ret; > + int nr = addr >> TARGET_PAGE_BITS; > + > + ret = test_and_clear_bit(nr, migration_bitmap); > + > + if (ret) { > + migration_dirty_pages--; > + } > + return ret; > +} > + > static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) > { > ram_addr_t addr; > @@ -660,6 +674,39 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, > ram_addr_t offset, > } > > /* > + * Unqueue a page from the queue fed by postcopy page requests > + * > + * Returns: The RAMBlock* to transmit from (or NULL if the queue is empty) > + * ms: MigrationState in > + * offset: the byte offset within the RAMBlock for the start of the page > + * bitoffset: global offset in the dirty/sent bitmaps > + */ > +static RAMBlock *ram_save_unqueue_page(MigrationState *ms, ram_addr_t > *offset, > + unsigned long *bitoffset) > +{ > + RAMBlock *result = NULL; > + qemu_mutex_lock(&ms->src_page_req_mutex); > + if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) { > + struct MigrationSrcPageRequest *entry = > + QSIMPLEQ_FIRST(&ms->src_page_requests); > + result = entry->rb; > + *offset = entry->offset; > + *bitoffset = (entry->offset + entry->rb->offset) >> TARGET_PAGE_BITS; > + > + if (entry->len > TARGET_PAGE_SIZE) { > + entry->len -= TARGET_PAGE_SIZE; > + entry->offset += TARGET_PAGE_SIZE; > + } else { > + QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req); > + g_free(entry); > + } > + } > + qemu_mutex_unlock(&ms->src_page_req_mutex); > + > + return result; > +} > + > +/* > * Queue the pages for transmission, e.g. a request from postcopy destination > * ms: MigrationStatus in which the queue is held > * rbname: The RAMBlock the request is for - may be NULL (to mean reuse > last) > @@ -720,44 +767,97 @@ int ram_save_queue_pages(MigrationState *ms, const char > *rbname, > > static int ram_find_and_save_block(QEMUFile *f, bool last_stage) > { > + MigrationState *ms = migrate_get_current(); > RAMBlock *block = last_seen_block; > + RAMBlock *tmpblock; > ram_addr_t offset = last_offset; > + ram_addr_t tmpoffset; > bool complete_round = false; > int bytes_sent = 0; > - MemoryRegion *mr; > unsigned long bitoffset; > + unsigned long hps = sysconf(_SC_PAGESIZE); > > - if (!block) > + if (!block) { > block = QTAILQ_FIRST(&ram_list.blocks); > + last_was_from_queue = false; > + } > > - while (true) { > - mr = block->mr; > - offset = migration_bitmap_find_and_reset_dirty(mr, offset, > &bitoffset); > - if (complete_round && block == last_seen_block && > - offset >= last_offset) { > - break; > + while (true) { /* Until we send a block or run out of stuff to send */ > + tmpblock = NULL; > + > + /* > + * Don't break host-page chunks up with queue items
Why does this matter? > + * so only unqueue if, > + * a) The last item came from the queue anyway > + * b) The last sent item was the last target-page in a host page > + */ > + if (last_was_from_queue || (!last_sent_block) || > + ((last_offset & (hps - 1)) == (hps - TARGET_PAGE_SIZE))) { > + tmpblock = ram_save_unqueue_page(ms, &tmpoffset, &bitoffset); > } > - if (offset >= block->length) { > - offset = 0; > - block = QTAILQ_NEXT(block, next); > - if (!block) { > - block = QTAILQ_FIRST(&ram_list.blocks); > - complete_round = true; > - ram_bulk_stage = false; > + > + if (tmpblock) { > + /* We've got a block from the postcopy queue */ > + DPRINTF("%s: Got postcopy item '%s' offset=%zx bitoffset=%zx", > + __func__, tmpblock->idstr, tmpoffset, bitoffset); > + /* We're sending this page, and since it's postcopy nothing else > + * will dirty it, and we must make sure it doesn't get sent > again. > + */ > + if (!migration_bitmap_clear_dirty(bitoffset << > TARGET_PAGE_BITS)) { Ugh.. that's kind of subtle. I think it would be clearer if you work in terms of a ram_addr_t throughout, rather than "bitoffset" whose meaning is not terribly obvious. -- David Gibson | I'll have my music baroque, and my code david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_ | _way_ _around_! http://www.ozlabs.org/~dgibson
pgp6WJkRbDnAN.pgp
Description: PGP signature