Originally there was only one counter to keep track of pages. It was
used erroneously to keep track of how many pages were mapped and how
many pages needed to be send. In the end munmap(2) always has 0 as the
length argument, which resulted in leaking the mapping.

This problem is discovered on 32bit toolstack because 32bit application
has notably smaller address space. In fact this bug affects 64bit
toolstack too.

Use a separate counter to keep track of how many pages to send to solve
this issue.

Signed-off-by: Wei Liu <wei.l...@citrix.com>
---
Cc: Andrew Cooper <andrew.coop...@citrix.com>
Cc: Ian Campbell <ian.campb...@citrix.com>
Cc: Ian Jackson <ian.jack...@eu.citrix.com>
---
 tools/libxc/xc_sr_save.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/tools/libxc/xc_sr_save.c b/tools/libxc/xc_sr_save.c
index d63b783..bbcfeb6 100644
--- a/tools/libxc/xc_sr_save.c
+++ b/tools/libxc/xc_sr_save.c
@@ -84,7 +84,7 @@ static int write_batch(struct xc_sr_context *ctx)
     void **guest_data = NULL;
     void **local_pages = NULL;
     int *errors = NULL, rc = -1;
-    unsigned i, p, nr_pages = 0;
+    unsigned i, p, nr_pages = 0, nr_pages_to_send;
     unsigned nr_pfns = ctx->save.nr_batch_pfns;
     void *page, *orig_page;
     uint64_t *rec_pfns = NULL;
@@ -151,10 +151,12 @@ static int write_batch(struct xc_sr_context *ctx)
         mfns[nr_pages++] = mfns[i];
     }
 
-    if ( nr_pages > 0 )
+    nr_pages_to_send = nr_pages;
+
+    if ( nr_pages_to_send > 0 )
     {
         guest_mapping = xc_map_foreign_bulk(
-            xch, ctx->domid, PROT_READ, mfns, errors, nr_pages);
+            xch, ctx->domid, PROT_READ, mfns, errors, nr_pages_to_send);
         if ( !guest_mapping )
         {
             PERROR("Failed to map guest pages");
@@ -191,7 +193,7 @@ static int write_batch(struct xc_sr_context *ctx)
                     set_bit(ctx->save.batch_pfns[i], ctx->save.deferred_pages);
                     ++ctx->save.nr_deferred_pages;
                     types[i] = XEN_DOMCTL_PFINFO_XTAB;
-                    --nr_pages;
+                    --nr_pages_to_send;
                 }
                 else
                     goto err;
@@ -216,7 +218,7 @@ static int write_batch(struct xc_sr_context *ctx)
 
     rec.length = sizeof(hdr);
     rec.length += nr_pfns * sizeof(*rec_pfns);
-    rec.length += nr_pages * PAGE_SIZE;
+    rec.length += nr_pages_to_send * PAGE_SIZE;
 
     for ( i = 0; i < nr_pfns; ++i )
         rec_pfns[i] = ((uint64_t)(types[i]) << 32) | ctx->save.batch_pfns[i];
@@ -235,7 +237,7 @@ static int write_batch(struct xc_sr_context *ctx)
 
     iovcnt = 4;
 
-    if ( nr_pages )
+    if ( nr_pages_to_send )
     {
         for ( i = 0; i < nr_pfns; ++i )
         {
@@ -244,7 +246,7 @@ static int write_batch(struct xc_sr_context *ctx)
                 iov[iovcnt].iov_base = guest_data[i];
                 iov[iovcnt].iov_len = PAGE_SIZE;
                 iovcnt++;
-                --nr_pages;
+                --nr_pages_to_send;
             }
         }
     }
@@ -256,7 +258,7 @@ static int write_batch(struct xc_sr_context *ctx)
     }
 
     /* Sanity check we have sent all the pages we expected to. */
-    assert(nr_pages == 0);
+    assert(nr_pages_to_send == 0);
     rc = ctx->save.nr_batch_pfns = 0;
 
  err:
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to