From: "Dr. David Alan Gilbert" <dgilb...@redhat.com> postcopy_place_page (etc) provide a way for postcopy to place a page into guests memory atomically (using the copy ioctl on the ufd).
Signed-off-by: Dr. David Alan Gilbert <dgilb...@redhat.com> Reviewed-by: Amit Shah <amit.s...@redhat.com> Reviewed-by: Juan Quintela <quint...@redhat.com> --- include/migration/migration.h | 1 + include/migration/postcopy-ram.h | 21 +++++++++ migration/postcopy-ram.c | 97 ++++++++++++++++++++++++++++++++++++++++ trace-events | 2 + 4 files changed, 121 insertions(+) diff --git a/include/migration/migration.h b/include/migration/migration.h index 1491bf3..a48471e 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -96,6 +96,7 @@ struct MigrationIncomingState { int userfault_fd; QEMUFile *to_src_file; QemuMutex rp_mutex; /* We send replies from multiple threads */ + void *postcopy_tmp_page; /* See savevm.c */ LoadStateEntry_Head loadvm_handlers; diff --git a/include/migration/postcopy-ram.h b/include/migration/postcopy-ram.h index b10c03d..d7c292f 100644 --- a/include/migration/postcopy-ram.h +++ b/include/migration/postcopy-ram.h @@ -69,4 +69,25 @@ void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds); +/* + * Place a page (from) at (host) efficiently + * There are restrictions on how 'from' must be mapped, in general best + * to use other postcopy_ routines to allocate. + * returns 0 on success + */ +int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from); + +/* + * Place a zero page at (host) atomically + * returns 0 on success + */ +int postcopy_place_page_zero(MigrationIncomingState *mis, void *host); + +/* + * Allocate a page of memory that can be mapped at a later point in time + * using postcopy_place_page + * Returns: Pointer to allocated page + */ +void *postcopy_get_tmp_page(MigrationIncomingState *mis); + #endif diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index a7d6c9f..9627093 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -272,6 +272,10 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) return -1; } + if (mis->postcopy_tmp_page) { + munmap(mis->postcopy_tmp_page, getpagesize()); + mis->postcopy_tmp_page = NULL; + } return 0; } @@ -338,6 +342,83 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis) return 0; } +/* + * Place a host page (from) at (host) atomically + * returns 0 on success + */ +int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from) +{ + struct uffdio_copy copy_struct; + + copy_struct.dst = (uint64_t)(uintptr_t)host; + copy_struct.src = (uint64_t)(uintptr_t)from; + copy_struct.len = getpagesize(); + copy_struct.mode = 0; + + /* copy also acks to the kernel waking the stalled thread up + * TODO: We can inhibit that ack and only do it if it was requested + * which would be slightly cheaper, but we'd have to be careful + * of the order of updating our page state. + */ + if (ioctl(mis->userfault_fd, UFFDIO_COPY, ©_struct)) { + int e = errno; + error_report("%s: %s copy host: %p from: %p", + __func__, strerror(e), host, from); + + return -e; + } + + trace_postcopy_place_page(host); + return 0; +} + +/* + * Place a zero page at (host) atomically + * returns 0 on success + */ +int postcopy_place_page_zero(MigrationIncomingState *mis, void *host) +{ + struct uffdio_zeropage zero_struct; + + zero_struct.range.start = (uint64_t)(uintptr_t)host; + zero_struct.range.len = getpagesize(); + zero_struct.mode = 0; + + if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) { + int e = errno; + error_report("%s: %s zero host: %p", + __func__, strerror(e), host); + + return -e; + } + + trace_postcopy_place_page_zero(host); + return 0; +} + +/* + * Returns a target page of memory that can be mapped at a later point in time + * using postcopy_place_page + * The same address is used repeatedly, postcopy_place_page just takes the + * backing page away. + * Returns: Pointer to allocated page + * + */ +void *postcopy_get_tmp_page(MigrationIncomingState *mis) +{ + if (!mis->postcopy_tmp_page) { + mis->postcopy_tmp_page = mmap(NULL, getpagesize(), + PROT_READ | PROT_WRITE, MAP_PRIVATE | + MAP_ANONYMOUS, -1, 0); + if (!mis->postcopy_tmp_page) { + error_report("%s: %s", __func__, strerror(errno)); + return NULL; + } + } + + return mis->postcopy_tmp_page; +} + #else /* No target OS support, stubs just fail */ bool postcopy_ram_supported_by_host(void) @@ -367,6 +448,22 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis) { assert(0); } + +int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from) +{ + assert(0); +} + +int postcopy_place_page_zero(MigrationIncomingState *mis, void *host) +{ + assert(0); +} + +void *postcopy_get_tmp_page(MigrationIncomingState *mis) +{ + assert(0); +} + #endif /* ------------------------------------------------------------------------- */ diff --git a/trace-events b/trace-events index 3df3656..c493f5d 100644 --- a/trace-events +++ b/trace-events @@ -1551,6 +1551,8 @@ postcopy_discard_send_range(const char *ramblock, unsigned long start, unsigned postcopy_ram_discard_range(void *start, size_t length) "%p,+%zx" postcopy_cleanup_range(const char *ramblock, void *host_addr, size_t offset, size_t length) "%s: %p offset=%zx length=%zx" postcopy_init_range(const char *ramblock, void *host_addr, size_t offset, size_t length) "%s: %p offset=%zx length=%zx" +postcopy_place_page(void *host_addr) "host=%p" +postcopy_place_page_zero(void *host_addr) "host=%p" # kvm-all.c kvm_ioctl(int type, void *arg) "type 0x%x, arg %p" -- 2.5.0