[Xen-devel] [PATCH 4/4] dmop: Add xendevicemodel_modified_memory_bulk()

2017-04-20 Thread jennifer.herbert
From: Jennifer Herbert 

This new lib devicemodel call allows multiple extents of pages to be
marked as modified in a single call.  This is something needed for a
usecase I'm working on.

The xen side of the modified_memory call has been modified to accept
an array of extents.  The devicemodel library either provides an array
of length 1, to support the original library function, or a second
function allows an array to be provided.

Signed-off-by: Jennifer Herbert 
--
CC: Jan Beulich 
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Ian Jackson 
CC: Wei Liu 
CC: Julien Grall 
---
 tools/libs/devicemodel/core.c   |  30 --
 tools/libs/devicemodel/include/xendevicemodel.h |  19 +++-
 xen/arch/x86/hvm/dm.c   | 117 
 xen/include/public/hvm/dm_op.h  |  19 +++-
 4 files changed, 134 insertions(+), 51 deletions(-)

diff --git a/tools/libs/devicemodel/core.c b/tools/libs/devicemodel/core.c
index ff09819..d7c6476 100644
--- a/tools/libs/devicemodel/core.c
+++ b/tools/libs/devicemodel/core.c
@@ -459,22 +459,36 @@ int xendevicemodel_track_dirty_vram(
  dirty_bitmap, (size_t)(nr + 7) / 8);
 }
 
-int xendevicemodel_modified_memory(
-xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
-uint32_t nr)
+int xendevicemodel_modified_memory_bulk(
+xendevicemodel_handle *dmod, domid_t domid,
+struct xen_dm_op_modified_memory_extent *extents, uint32_t nr)
 {
 struct xen_dm_op op;
-struct xen_dm_op_modified_memory *data;
+struct xen_dm_op_modified_memory *header;
+size_t extents_size = nr * sizeof(struct xen_dm_op_modified_memory_extent);
 
 memset(&op, 0, sizeof(op));
 
 op.op = XEN_DMOP_modified_memory;
-data = &op.u.modified_memory;
+header = &op.u.modified_memory;
 
-data->first_pfn = first_pfn;
-data->nr = nr;
+header->nr_extents = nr;
+header->opaque = 0;
 
-return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
+return xendevicemodel_op(dmod, domid, 2, &op, sizeof(op),
+ extents, extents_size);
+}
+
+int xendevicemodel_modified_memory(
+xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
+uint32_t nr)
+{
+struct xen_dm_op_modified_memory_extent extent;
+
+extent.first_pfn = first_pfn;
+extent.nr = nr;
+
+return xendevicemodel_modified_memory_bulk(dmod, domid, &extent, 1);
 }
 
 int xendevicemodel_set_mem_type(
diff --git a/tools/libs/devicemodel/include/xendevicemodel.h 
b/tools/libs/devicemodel/include/xendevicemodel.h
index 1da216f..580fad2 100644
--- a/tools/libs/devicemodel/include/xendevicemodel.h
+++ b/tools/libs/devicemodel/include/xendevicemodel.h
@@ -254,8 +254,8 @@ int xendevicemodel_track_dirty_vram(
 uint32_t nr, unsigned long *dirty_bitmap);
 
 /**
- * This function notifies the hypervisor that a set of domain pages
- * have been modified.
+ * This function notifies the hypervisor that a set of contiguous
+ * domain pages have been modified.
  *
  * @parm dmod a handle to an open devicemodel interface.
  * @parm domid the domain id to be serviced
@@ -268,6 +268,21 @@ int xendevicemodel_modified_memory(
 uint32_t nr);
 
 /**
+ * This function notifies the hypervisor that a set of discontiguous
+ * domain pages have been modified.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm extents an array of extent structs, which each hold
+ a start_pfn and nr (number of pfns).
+ * @parm nr the number of extents in the array
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_modified_memory_bulk(
+xendevicemodel_handle *dmod, domid_t domid,
+struct xen_dm_op_modified_memory_extent extents[], uint32_t nr);
+
+/**
  * This function notifies the hypervisor that a set of domain pages
  * are to be treated in a specific way. (See the definition of
  * hvmmem_type_t).
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 6990725..61df3cf 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -155,56 +155,102 @@ static int set_isa_irq_level(struct domain *d, uint8_t 
isa_irq,
 }
 
 static int modified_memory(struct domain *d,
-   struct xen_dm_op_modified_memory *data)
+   struct dmop_args *bufs,
+   struct xen_dm_op_modified_memory *header)
 {
-xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
-unsigned int iter = 0;
-int rc = 0;
+#define EXTENTS_BUFFER 1
 
-if ( (data->first_pfn > last_pfn) ||
- (last_pfn > domain_get_maximum_gpfn(d)) )
-return -EINVAL;
+/* Process maximum of 256 pfns before checking for continuation. */
+const unsigned int cont_check_interval = 0x100;
+unsigned int *rem_extents =  &header->nr_extents;
+unsigned int batch_rem_pfns = cont_check_interval;
+/* Used for continuation. */
+unsigned int 

[Xen-devel] [PATCH 2/4] hvm/dmop: Implement copy_{to, from}_guest_buf() in terms of raw accessors

2017-04-20 Thread jennifer.herbert
From: Jennifer Herbert 

This also allows the usual cases to be simplified, by omitting an unnecessary
buf parameters, and because the macros can appropriately size the object.

This makes copying to or from a buf that isn't big enough an error.
If the buffer isnt big enough, trying to carry on regardless
can only cause trouble later on.

Signed-off-by: Andrew Cooper 
Signed-off-by: Jennifer Herbert 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
 xen/arch/x86/hvm/dm.c | 47 +--
 1 file changed, 29 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index fb4bcec..3607ddb 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -32,36 +32,47 @@ struct dmop_args {
 struct xen_dm_op_buf buf[2];
 };
 
-static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
-unsigned int nr_bufs, void *dst,
-unsigned int idx, size_t dst_size)
+static bool _raw_copy_from_guest_buf(void *dst,
+ const struct dmop_args *args,
+ unsigned int buf_idx,
+ size_t dst_bytes)
 {
-size_t size;
+size_t buf_bytes;
 
-if ( idx >= nr_bufs )
+if ( buf_idx >= args->nr_bufs )
 return false;
 
-memset(dst, 0, dst_size);
+buf_bytes =  args->buf[buf_idx].size;
 
-size = min_t(size_t, dst_size, bufs[idx].size);
+if ( dst_bytes > buf_bytes )
+return false;
 
-return !copy_from_guest(dst, bufs[idx].h, size);
+return !copy_from_guest(dst, args->buf[buf_idx].h, buf_bytes);
 }
 
-static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
-  unsigned int nr_bufs, unsigned int idx,
-  const void *src, size_t src_size)
+static bool _raw_copy_to_guest_buf(struct dmop_args *args,
+   unsigned int buf_idx,
+   const void *src, size_t src_bytes)
 {
-size_t size;
+size_t buf_bytes;
 
-if ( idx >= nr_bufs )
+if ( buf_idx >= args->nr_bufs )
 return false;
 
-size = min_t(size_t, bufs[idx].size, src_size);
+buf_bytes = args->buf[buf_idx].size;
+
+if ( src_bytes > buf_bytes )
+return false;
 
-return !copy_to_guest(bufs[idx].h, src, size);
+return !copy_to_guest(args->buf[buf_idx].h, src, buf_bytes);
 }
 
+#define copy_from_guest_buf(dst, args, buf_idx) \
+_raw_copy_from_guest_buf(dst, args, buf_idx, sizeof(*(dst)))
+
+#define copy_to_guest_buf(args, buf_idx, src) \
+_raw_copy_to_guest_buf(args, buf_idx, src, sizeof(*(src)))
+
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
 unsigned int nr, struct xen_dm_op_buf *buf)
 {
@@ -312,7 +323,7 @@ static int dm_op(struct dmop_args *op_args)
 if ( rc )
 goto out;
 
-if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, 
sizeof(op)) )
+if ( !copy_from_guest_buf(&op, op_args, 0) );
 {
 rc = -EFAULT;
 goto out;
@@ -568,8 +579,8 @@ static int dm_op(struct dmop_args *op_args)
 }
 
 if ( (!rc || rc == -ERESTART) &&
- !const_op &&
- !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, 
sizeof(op)) )
+ !const_op && !copy_to_guest_buf(op_args, 0, &op) )
+
 rc = -EFAULT;
 
  out:
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 3/4] hvm/dmop: Implement copy_{to, from}_guest_buf_offset() helpers

2017-04-20 Thread jennifer.herbert
From: Jennifer Herbert 

copy_{to,from}_guest_buf() are now implemented using an offset of 0.

Signed-off-by: Andrew Cooper 
Signed-off-by: Jennifer Herbert 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
 xen/arch/x86/hvm/dm.c | 48 +---
 1 file changed, 33 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 3607ddb..6990725 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -32,10 +32,11 @@ struct dmop_args {
 struct xen_dm_op_buf buf[2];
 };
 
-static bool _raw_copy_from_guest_buf(void *dst,
- const struct dmop_args *args,
- unsigned int buf_idx,
- size_t dst_bytes)
+static bool _raw_copy_from_guest_buf_offset(void *dst,
+const struct dmop_args *args,
+unsigned int buf_idx,
+size_t offset_bytes,
+size_t dst_bytes)
 {
 size_t buf_bytes;
 
@@ -44,15 +45,20 @@ static bool _raw_copy_from_guest_buf(void *dst,
 
 buf_bytes =  args->buf[buf_idx].size;
 
-if ( dst_bytes > buf_bytes )
+if ( offset_bytes >= buf_bytes ||
+ (offset_bytes + dst_bytes) < offset_bytes ||
+ (offset_bytes + dst_bytes) > buf_bytes )
 return false;
 
-return !copy_from_guest(dst, args->buf[buf_idx].h, buf_bytes);
+return !copy_from_guest_offset(dst, args->buf[buf_idx].h,
+   offset_bytes, dst_bytes);
 }
 
-static bool _raw_copy_to_guest_buf(struct dmop_args *args,
-   unsigned int buf_idx,
-   const void *src, size_t src_bytes)
+static bool _raw_copy_to_guest_buf_offset(struct dmop_args *args,
+  unsigned int buf_idx,
+  size_t offset_bytes,
+  const void *src,
+  size_t src_bytes)
 {
 size_t buf_bytes;
 
@@ -61,17 +67,29 @@ static bool _raw_copy_to_guest_buf(struct dmop_args *args,
 
 buf_bytes = args->buf[buf_idx].size;
 
-if ( src_bytes > buf_bytes )
+
+if ( offset_bytes >= buf_bytes ||
+ (offset_bytes + src_bytes) < offset_bytes ||
+ (offset_bytes + src_bytes) > buf_bytes )
 return false;
 
-return !copy_to_guest(args->buf[buf_idx].h, src, buf_bytes);
+return !copy_to_guest_offset(args->buf[buf_idx].h, offset_bytes,
+ src, src_bytes);
 }
 
-#define copy_from_guest_buf(dst, args, buf_idx) \
-_raw_copy_from_guest_buf(dst, args, buf_idx, sizeof(*(dst)))
+#define copy_from_guest_buf_offset(dst, bufs, buf_idx, offset_bytes) \
+_raw_copy_from_guest_buf_offset(dst, bufs, buf_idx, offset_bytes, \
+sizeof(*(dst)))
+
+#define copy_to_guest_buf_offset(bufs, buf_idx, offset_bytes, src) \
+_raw_copy_to_guest_buf_offset(bufs, buf_idx, offset_bytes, \
+  src, sizeof(*(src)))
+
+#define copy_from_guest_buf(dst, bufs, buf_idx) \
+copy_from_guest_buf_offset(dst, bufs, buf_idx, 0)
 
-#define copy_to_guest_buf(args, buf_idx, src) \
-_raw_copy_to_guest_buf(args, buf_idx, src, sizeof(*(src)))
+#define copy_to_guest_buf(bufs, buf_idx, src) \
+copy_to_guest_buf_offset(bufs, buf_idx, 0, src)
 
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
 unsigned int nr, struct xen_dm_op_buf *buf)
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 1/4] hvm/dmop: Box dmop_args rather than passing multiple parameters around

2017-04-20 Thread jennifer.herbert
From: Jennifer Herbert 

No functional change.

Signed-off-by: Jennifer Herbert 
Signed-off-by: Andrew Cooper 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
 xen/arch/x86/hvm/dm.c | 47 ---
 1 file changed, 28 insertions(+), 19 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d72b7bd..fb4bcec 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -25,6 +25,13 @@
 
 #include 
 
+struct dmop_args {
+domid_t domid;
+unsigned int nr_bufs;
+/* Reserve enough buf elements for all current hypercalls. */
+struct xen_dm_op_buf buf[2];
+};
+
 static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
 unsigned int nr_bufs, void *dst,
 unsigned int idx, size_t dst_size)
@@ -287,16 +294,14 @@ static int inject_event(struct domain *d,
 return 0;
 }
 
-static int dm_op(domid_t domid,
- unsigned int nr_bufs,
- xen_dm_op_buf_t bufs[])
+static int dm_op(struct dmop_args *op_args)
 {
 struct domain *d;
 struct xen_dm_op op;
 bool const_op = true;
 long rc;
 
-rc = rcu_lock_remote_domain_by_id(domid, &d);
+rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
 if ( rc )
 return rc;
 
@@ -307,7 +312,7 @@ static int dm_op(domid_t domid,
 if ( rc )
 goto out;
 
-if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) )
+if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, 
sizeof(op)) )
 {
 rc = -EFAULT;
 goto out;
@@ -466,10 +471,10 @@ static int dm_op(domid_t domid,
 if ( data->pad )
 break;
 
-if ( nr_bufs < 2 )
+if ( op_args->nr_bufs < 2 )
 break;
 
-rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]);
+rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]);
 break;
 }
 
@@ -564,7 +569,7 @@ static int dm_op(domid_t domid,
 
 if ( (!rc || rc == -ERESTART) &&
  !const_op &&
- !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) )
+ !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, 
sizeof(op)) )
 rc = -EFAULT;
 
  out:
@@ -587,20 +592,21 @@ CHECK_dm_op_set_mem_type;
 CHECK_dm_op_inject_event;
 CHECK_dm_op_inject_msi;
 
-#define MAX_NR_BUFS 2
-
 int compat_dm_op(domid_t domid,
  unsigned int nr_bufs,
  XEN_GUEST_HANDLE_PARAM(void) bufs)
 {
-struct xen_dm_op_buf nat[MAX_NR_BUFS];
+struct dmop_args args;
 unsigned int i;
 int rc;
 
-if ( nr_bufs > MAX_NR_BUFS )
+if ( nr_bufs > ARRAY_SIZE(args.buf) )
 return -E2BIG;
 
-for ( i = 0; i < nr_bufs; i++ )
+args.domid = domid;
+args.nr_bufs = nr_bufs;
+
+for ( i = 0; i < args.nr_bufs; i++ )
 {
 struct compat_dm_op_buf cmp;
 
@@ -610,12 +616,12 @@ int compat_dm_op(domid_t domid,
 #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \
 guest_from_compat_handle((_d_)->h, (_s_)->h)
 
-XLAT_dm_op_buf(&nat[i], &cmp);
+XLAT_dm_op_buf(&args.buf[i], &cmp);
 
 #undef XLAT_dm_op_buf_HNDL_h
 }
 
-rc = dm_op(domid, nr_bufs, nat);
+rc = dm_op(&args);
 
 if ( rc == -ERESTART )
 rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
@@ -628,16 +634,19 @@ long do_dm_op(domid_t domid,
   unsigned int nr_bufs,
   XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
 {
-struct xen_dm_op_buf nat[MAX_NR_BUFS];
+struct dmop_args args;
 int rc;
 
-if ( nr_bufs > MAX_NR_BUFS )
+if ( nr_bufs > ARRAY_SIZE(args.buf) )
 return -E2BIG;
 
-if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) )
+args.domid = domid;
+args.nr_bufs = nr_bufs;
+
+if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
 return -EFAULT;
 
-rc = dm_op(domid, nr_bufs, nat);
+rc = dm_op(&args);
 
 if ( rc == -ERESTART )
 rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 for-4.9 5/5] dmop: Add xendevicemodel_modified_memory_bulk()

2017-04-21 Thread jennifer.herbert
From: Jennifer Herbert 

This new lib devicemodel call allows multiple extents of pages to be
marked as modified in a single call.  This is something needed for a
usecase I'm working on.

The xen side of the modified_memory call has been modified to accept
an array of extents.  The devicemodel library either provides an array
of length 1, to support the original library function, or a second
function allows an array to be provided.

Signed-off-by: Jennifer Herbert 
Reviewed-by: Jan Beulich 
Reviewed-by: Paul Durrant 
Acked-by: Wei Liu 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Ian Jackson 
CC: Wei Liu 
CC: Julien Grall 

---
No change.
---
 tools/libs/devicemodel/core.c   |  30 --
 tools/libs/devicemodel/include/xendevicemodel.h |  19 +++-
 xen/arch/x86/hvm/dm.c   | 117 
 xen/include/public/hvm/dm_op.h  |  19 +++-
 4 files changed, 134 insertions(+), 51 deletions(-)

diff --git a/tools/libs/devicemodel/core.c b/tools/libs/devicemodel/core.c
index ff09819..d7c6476 100644
--- a/tools/libs/devicemodel/core.c
+++ b/tools/libs/devicemodel/core.c
@@ -459,22 +459,36 @@ int xendevicemodel_track_dirty_vram(
  dirty_bitmap, (size_t)(nr + 7) / 8);
 }
 
-int xendevicemodel_modified_memory(
-xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
-uint32_t nr)
+int xendevicemodel_modified_memory_bulk(
+xendevicemodel_handle *dmod, domid_t domid,
+struct xen_dm_op_modified_memory_extent *extents, uint32_t nr)
 {
 struct xen_dm_op op;
-struct xen_dm_op_modified_memory *data;
+struct xen_dm_op_modified_memory *header;
+size_t extents_size = nr * sizeof(struct xen_dm_op_modified_memory_extent);
 
 memset(&op, 0, sizeof(op));
 
 op.op = XEN_DMOP_modified_memory;
-data = &op.u.modified_memory;
+header = &op.u.modified_memory;
 
-data->first_pfn = first_pfn;
-data->nr = nr;
+header->nr_extents = nr;
+header->opaque = 0;
 
-return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
+return xendevicemodel_op(dmod, domid, 2, &op, sizeof(op),
+ extents, extents_size);
+}
+
+int xendevicemodel_modified_memory(
+xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
+uint32_t nr)
+{
+struct xen_dm_op_modified_memory_extent extent;
+
+extent.first_pfn = first_pfn;
+extent.nr = nr;
+
+return xendevicemodel_modified_memory_bulk(dmod, domid, &extent, 1);
 }
 
 int xendevicemodel_set_mem_type(
diff --git a/tools/libs/devicemodel/include/xendevicemodel.h 
b/tools/libs/devicemodel/include/xendevicemodel.h
index 1da216f..580fad2 100644
--- a/tools/libs/devicemodel/include/xendevicemodel.h
+++ b/tools/libs/devicemodel/include/xendevicemodel.h
@@ -254,8 +254,8 @@ int xendevicemodel_track_dirty_vram(
 uint32_t nr, unsigned long *dirty_bitmap);
 
 /**
- * This function notifies the hypervisor that a set of domain pages
- * have been modified.
+ * This function notifies the hypervisor that a set of contiguous
+ * domain pages have been modified.
  *
  * @parm dmod a handle to an open devicemodel interface.
  * @parm domid the domain id to be serviced
@@ -268,6 +268,21 @@ int xendevicemodel_modified_memory(
 uint32_t nr);
 
 /**
+ * This function notifies the hypervisor that a set of discontiguous
+ * domain pages have been modified.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm extents an array of extent structs, which each hold
+ a start_pfn and nr (number of pfns).
+ * @parm nr the number of extents in the array
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_modified_memory_bulk(
+xendevicemodel_handle *dmod, domid_t domid,
+struct xen_dm_op_modified_memory_extent extents[], uint32_t nr);
+
+/**
  * This function notifies the hypervisor that a set of domain pages
  * are to be treated in a specific way. (See the definition of
  * hvmmem_type_t).
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 196729a..ab181d8 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -153,56 +153,102 @@ static int set_isa_irq_level(struct domain *d, uint8_t 
isa_irq,
 }
 
 static int modified_memory(struct domain *d,
-   struct xen_dm_op_modified_memory *data)
+   const struct dmop_args *bufs,
+   struct xen_dm_op_modified_memory *header)
 {
-xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
-unsigned int iter = 0;
-int rc = 0;
+#define EXTENTS_BUFFER 1
 
-if ( (data->first_pfn > last_pfn) ||
- (last_pfn > domain_get_maximum_gpfn(d)) )
-return -EINVAL;
+/* Process maximum of 256 pfns before checking for continuation. */
+const unsigned int cont_check_interval = 0x100;
+unsigned int *rem_extents =  &header->nr_extents;
+unsigned 

[Xen-devel] [PATCH v7 for-4.9 1/5] hvm/dmop: Box dmop_args rather than passing multiple parameters around

2017-04-21 Thread jennifer.herbert
From: Jennifer Herbert 

No functional change.

Signed-off-by: Jennifer Herbert 
Signed-off-by: Andrew Cooper 
Reviewed-by: Jan Beulich 
Reviewed-by: Paul Durrant 

--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
dm_op now takes a const sturct.
---
 xen/arch/x86/hvm/dm.c | 49 +
 1 file changed, 29 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d72b7bd..e583e41 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -25,6 +25,13 @@
 
 #include 
 
+struct dmop_args {
+domid_t domid;
+unsigned int nr_bufs;
+/* Reserve enough buf elements for all current hypercalls. */
+struct xen_dm_op_buf buf[2];
+};
+
 static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
 unsigned int nr_bufs, void *dst,
 unsigned int idx, size_t dst_size)
@@ -56,7 +63,7 @@ static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
 }
 
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
-unsigned int nr, struct xen_dm_op_buf *buf)
+unsigned int nr, const struct xen_dm_op_buf *buf)
 {
 if ( nr > (GB(1) >> PAGE_SHIFT) )
 return -EINVAL;
@@ -287,16 +294,14 @@ static int inject_event(struct domain *d,
 return 0;
 }
 
-static int dm_op(domid_t domid,
- unsigned int nr_bufs,
- xen_dm_op_buf_t bufs[])
+static int dm_op(const struct dmop_args *op_args)
 {
 struct domain *d;
 struct xen_dm_op op;
 bool const_op = true;
 long rc;
 
-rc = rcu_lock_remote_domain_by_id(domid, &d);
+rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
 if ( rc )
 return rc;
 
@@ -307,7 +312,7 @@ static int dm_op(domid_t domid,
 if ( rc )
 goto out;
 
-if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) )
+if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, 
sizeof(op)) )
 {
 rc = -EFAULT;
 goto out;
@@ -466,10 +471,10 @@ static int dm_op(domid_t domid,
 if ( data->pad )
 break;
 
-if ( nr_bufs < 2 )
+if ( op_args->nr_bufs < 2 )
 break;
 
-rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]);
+rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]);
 break;
 }
 
@@ -564,7 +569,7 @@ static int dm_op(domid_t domid,
 
 if ( (!rc || rc == -ERESTART) &&
  !const_op &&
- !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) )
+ !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, 
sizeof(op)) )
 rc = -EFAULT;
 
  out:
@@ -587,20 +592,21 @@ CHECK_dm_op_set_mem_type;
 CHECK_dm_op_inject_event;
 CHECK_dm_op_inject_msi;
 
-#define MAX_NR_BUFS 2
-
 int compat_dm_op(domid_t domid,
  unsigned int nr_bufs,
  XEN_GUEST_HANDLE_PARAM(void) bufs)
 {
-struct xen_dm_op_buf nat[MAX_NR_BUFS];
+struct dmop_args args;
 unsigned int i;
 int rc;
 
-if ( nr_bufs > MAX_NR_BUFS )
+if ( nr_bufs > ARRAY_SIZE(args.buf) )
 return -E2BIG;
 
-for ( i = 0; i < nr_bufs; i++ )
+args.domid = domid;
+args.nr_bufs = nr_bufs;
+
+for ( i = 0; i < args.nr_bufs; i++ )
 {
 struct compat_dm_op_buf cmp;
 
@@ -610,12 +616,12 @@ int compat_dm_op(domid_t domid,
 #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \
 guest_from_compat_handle((_d_)->h, (_s_)->h)
 
-XLAT_dm_op_buf(&nat[i], &cmp);
+XLAT_dm_op_buf(&args.buf[i], &cmp);
 
 #undef XLAT_dm_op_buf_HNDL_h
 }
 
-rc = dm_op(domid, nr_bufs, nat);
+rc = dm_op(&args);
 
 if ( rc == -ERESTART )
 rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
@@ -628,16 +634,19 @@ long do_dm_op(domid_t domid,
   unsigned int nr_bufs,
   XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
 {
-struct xen_dm_op_buf nat[MAX_NR_BUFS];
+struct dmop_args args;
 int rc;
 
-if ( nr_bufs > MAX_NR_BUFS )
+if ( nr_bufs > ARRAY_SIZE(args.buf) )
 return -E2BIG;
 
-if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) )
+args.domid = domid;
+args.nr_bufs = nr_bufs;
+
+if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
 return -EFAULT;
 
-rc = dm_op(domid, nr_bufs, nat);
+rc = dm_op(&args);
 
 if ( rc == -ERESTART )
 rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 for-4.9 4/5] hvm/dmop: Implement copy_{to, from}_guest_buf_offset() helpers

2017-04-21 Thread jennifer.herbert
From: Andrew Cooper 

copy_{to,from}_guest_buf() are now implemented using an offset of 0.

Signed-off-by: Andrew Cooper 
Signed-off-by: Jennifer Herbert 
Reviewed-by: Paul Durrant 
Reviewed-by: Jan Beulich 

--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 

---
No change.
---
 xen/arch/x86/hvm/dm.c | 46 +++---
 1 file changed, 31 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index f29a290..196729a 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -32,10 +32,11 @@ struct dmop_args {
 struct xen_dm_op_buf buf[2];
 };
 
-static bool _raw_copy_from_guest_buf(void *dst,
- const struct dmop_args *args,
- unsigned int buf_idx,
- size_t dst_bytes)
+static bool _raw_copy_from_guest_buf_offset(void *dst,
+const struct dmop_args *args,
+unsigned int buf_idx,
+size_t offset_bytes,
+size_t dst_bytes)
 {
 size_t buf_bytes;
 
@@ -44,15 +45,19 @@ static bool _raw_copy_from_guest_buf(void *dst,
 
 buf_bytes =  args->buf[buf_idx].size;
 
-if ( dst_bytes > buf_bytes )
+if ( (offset_bytes + dst_bytes) < offset_bytes ||
+ (offset_bytes + dst_bytes) > buf_bytes )
 return false;
 
-return !copy_from_guest(dst, args->buf[buf_idx].h, buf_bytes);
+return !copy_from_guest_offset(dst, args->buf[buf_idx].h,
+   offset_bytes, dst_bytes);
 }
 
-static bool _raw_copy_to_guest_buf(const struct dmop_args *args,
-   unsigned int buf_idx,
-   const void *src, size_t src_bytes)
+static bool _raw_copy_to_guest_buf_offset(const struct dmop_args *args,
+  unsigned int buf_idx,
+  size_t offset_bytes,
+  const void *src,
+  size_t src_bytes)
 {
 size_t buf_bytes;
 
@@ -61,17 +66,28 @@ static bool _raw_copy_to_guest_buf(const struct dmop_args 
*args,
 
 buf_bytes = args->buf[buf_idx].size;
 
-if ( src_bytes > buf_bytes )
+
+if ( (offset_bytes + src_bytes) < offset_bytes ||
+ (offset_bytes + src_bytes) > buf_bytes )
 return false;
 
-return !copy_to_guest(args->buf[buf_idx].h, src, buf_bytes);
+return !copy_to_guest_offset(args->buf[buf_idx].h, offset_bytes,
+ src, src_bytes);
 }
 
-#define COPY_FROM_GUEST_BUF(dst, args, buf_idx) \
-_raw_copy_from_guest_buf(&dst, args, buf_idx, sizeof(dst))
+#define COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, offset_bytes) \
+_raw_copy_from_guest_buf_offset(&dst, bufs, buf_idx, offset_bytes, \
+sizeof(dst))
+
+#define COPY_TO_GUEST_BUF_OFFSET(bufs, buf_idx, offset_bytes, src) \
+_raw_copy_to_guest_buf_offset(bufs, buf_idx, offset_bytes, \
+  src, sizeof(*(src)))
+
+#define COPY_FROM_GUEST_BUF(dst, bufs, buf_idx) \
+COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, 0)
 
-#define COPY_TO_GUEST_BUF(args, buf_idx, src) \
-_raw_copy_to_guest_buf(args, buf_idx, &src, sizeof(src))
+#define COPY_TO_GUEST_BUF(bufs, buf_idx, src) \
+copy_to_guest_buf_offset(bufs, buf_idx, 0, src)
 
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
 unsigned int nr, const struct xen_dm_op_buf *buf)
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 for-4.9 3/5] hvm/dmop: Implement copy_{to, from}_guest_buf() in terms of raw accessors

2017-04-21 Thread jennifer.herbert
From: Andrew Cooper 

This also allows the usual cases to be simplified, by omitting an unnecessary
buf parameters, and because the macros can appropriately size the object.

Signed-off-by: Andrew Cooper 
Signed-off-by: Jennifer Herbert 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
This patch is now seperate from the patch that changes behavour.
This also has macros that take the object and not a pointer to it,
as this was considered a potential trap.
As to prevent symatics confusing people, its is more explicitly
a macro, via capping them.
---
 xen/arch/x86/hvm/dm.c | 43 ++-
 1 file changed, 26 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 63aa46c..f29a290 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -32,38 +32,47 @@ struct dmop_args {
 struct xen_dm_op_buf buf[2];
 };
 
-static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
-unsigned int nr_bufs, void *dst,
-unsigned int idx, size_t dst_size)
+static bool _raw_copy_from_guest_buf(void *dst,
+ const struct dmop_args *args,
+ unsigned int buf_idx,
+ size_t dst_bytes)
 {
 size_t buf_bytes;
 
-if ( idx >= nr_bufs )
+if ( buf_idx >= args->nr_bufs )
 return false;
 
-buf_bytes = bufs[idx].size;
-if ( dst_size > buf_bytes )
+buf_bytes =  args->buf[buf_idx].size;
+
+if ( dst_bytes > buf_bytes )
 return false;
 
-return !copy_from_guest(dst, bufs[idx].h, buf_bytes);
+return !copy_from_guest(dst, args->buf[buf_idx].h, buf_bytes);
 }
 
-static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
-  unsigned int nr_bufs, unsigned int idx,
-  const void *src, size_t src_size)
+static bool _raw_copy_to_guest_buf(const struct dmop_args *args,
+   unsigned int buf_idx,
+   const void *src, size_t src_bytes)
 {
 size_t buf_bytes;
 
-if ( idx >= nr_bufs )
+if ( buf_idx >= args->nr_bufs )
 return false;
 
-buf_bytes = bufs[idx].size;
-if ( src_size > buf_bytes )
+buf_bytes = args->buf[buf_idx].size;
+
+if ( src_bytes > buf_bytes )
 return false;
 
-return !copy_to_guest(bufs[idx].h, src, buf_bytes);
+return !copy_to_guest(args->buf[buf_idx].h, src, buf_bytes);
 }
 
+#define COPY_FROM_GUEST_BUF(dst, args, buf_idx) \
+_raw_copy_from_guest_buf(&dst, args, buf_idx, sizeof(dst))
+
+#define COPY_TO_GUEST_BUF(args, buf_idx, src) \
+_raw_copy_to_guest_buf(args, buf_idx, &src, sizeof(src))
+
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
 unsigned int nr, const struct xen_dm_op_buf *buf)
 {
@@ -314,7 +323,7 @@ static int dm_op(const struct dmop_args *op_args)
 if ( rc )
 goto out;
 
-if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, 
sizeof(op)) )
+if ( !COPY_FROM_GUEST_BUF(op, op_args, 0) );
 {
 rc = -EFAULT;
 goto out;
@@ -570,8 +579,8 @@ static int dm_op(const struct dmop_args *op_args)
 }
 
 if ( (!rc || rc == -ERESTART) &&
- !const_op &&
- !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, 
sizeof(op)) )
+ !const_op && !COPY_TO_GUEST_BUF(op_args, 0, op) )
+
 rc = -EFAULT;
 
  out:
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 for-4.9 2/5] hvm/dmop: Make copy_buf_{from, to}_guest for a buffer not big enough an error.

2017-04-21 Thread jennifer.herbert
From: Jennifer Herbert 

This makes copying to or from a buf that isn't big enough an error.
If the buffer isnt big enough, trying to carry on regardless
can only cause trouble later on.

Signed-off-by: Jennifer Herbert 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
This patch takes the behaviour change part of patch the previouse
[patch 2/4].
---
 xen/arch/x86/hvm/dm.c | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index e583e41..63aa46c 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -36,30 +36,32 @@ static bool copy_buf_from_guest(const xen_dm_op_buf_t 
bufs[],
 unsigned int nr_bufs, void *dst,
 unsigned int idx, size_t dst_size)
 {
-size_t size;
+size_t buf_bytes;
 
 if ( idx >= nr_bufs )
 return false;
 
-memset(dst, 0, dst_size);
-
-size = min_t(size_t, dst_size, bufs[idx].size);
+buf_bytes = bufs[idx].size;
+if ( dst_size > buf_bytes )
+return false;
 
-return !copy_from_guest(dst, bufs[idx].h, size);
+return !copy_from_guest(dst, bufs[idx].h, buf_bytes);
 }
 
 static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
   unsigned int nr_bufs, unsigned int idx,
   const void *src, size_t src_size)
 {
-size_t size;
+size_t buf_bytes;
 
 if ( idx >= nr_bufs )
 return false;
 
-size = min_t(size_t, bufs[idx].size, src_size);
+buf_bytes = bufs[idx].size;
+if ( src_size > buf_bytes )
+return false;
 
-return !copy_to_guest(bufs[idx].h, src, size);
+return !copy_to_guest(bufs[idx].h, src, buf_bytes);
 }
 
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v8 for-4.9 4/5] hvm/dmop: Implement copy_{to, from}_guest_buf_offset() helpers

2017-04-21 Thread jennifer.herbert
From: Andrew Cooper 

copy_{to,from}_guest_buf() are now implemented using an offset of 0.

Signed-off-by: Andrew Cooper 
Signed-off-by: Jennifer Herbert 
Reviewed-by: Paul Durrant 
Reviewed-by: Jan Beulich 

--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 

--
Rebased
---
 xen/arch/x86/hvm/dm.c | 46 +++---
 1 file changed, 31 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index b31c252..c91895f 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -32,10 +32,11 @@ struct dmop_args {
 struct xen_dm_op_buf buf[2];
 };
 
-static bool _raw_copy_from_guest_buf(void *dst,
- const struct dmop_args *args,
- unsigned int buf_idx,
- size_t dst_bytes)
+static bool _raw_copy_from_guest_buf_offset(void *dst,
+const struct dmop_args *args,
+unsigned int buf_idx,
+size_t offset_bytes,
+size_t dst_bytes)
 {
 size_t buf_bytes;
 
@@ -44,15 +45,19 @@ static bool _raw_copy_from_guest_buf(void *dst,
 
 buf_bytes =  args->buf[buf_idx].size;
 
-if ( dst_bytes > buf_bytes )
+if ( (offset_bytes + dst_bytes) < offset_bytes ||
+ (offset_bytes + dst_bytes) > buf_bytes )
 return false;
 
-return !copy_from_guest(dst, args->buf[buf_idx].h, dst_bytes);
+return !copy_from_guest_offset(dst, args->buf[buf_idx].h,
+   offset_bytes, dst_bytes);
 }
 
-static bool _raw_copy_to_guest_buf(const struct dmop_args *args,
-   unsigned int buf_idx,
-   const void *src, size_t src_bytes)
+static bool _raw_copy_to_guest_buf_offset(const struct dmop_args *args,
+  unsigned int buf_idx,
+  size_t offset_bytes,
+  const void *src,
+  size_t src_bytes)
 {
 size_t buf_bytes;
 
@@ -61,17 +66,28 @@ static bool _raw_copy_to_guest_buf(const struct dmop_args 
*args,
 
 buf_bytes = args->buf[buf_idx].size;
 
-if ( src_bytes > buf_bytes )
+
+if ( (offset_bytes + src_bytes) < offset_bytes ||
+ (offset_bytes + src_bytes) > buf_bytes )
 return false;
 
-return !copy_to_guest(args->buf[buf_idx].h, src, src_bytes);
+return !copy_to_guest_offset(args->buf[buf_idx].h, offset_bytes,
+ src, src_bytes);
 }
 
-#define COPY_FROM_GUEST_BUF(dst, args, buf_idx) \
-_raw_copy_from_guest_buf(&dst, args, buf_idx, sizeof(dst))
+#define COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, offset_bytes) \
+_raw_copy_from_guest_buf_offset(&dst, bufs, buf_idx, offset_bytes, \
+sizeof(dst))
+
+#define COPY_TO_GUEST_BUF_OFFSET(bufs, buf_idx, offset_bytes, src) \
+_raw_copy_to_guest_buf_offset(bufs, buf_idx, offset_bytes, \
+  &src, sizeof(src))
+
+#define COPY_FROM_GUEST_BUF(dst, bufs, buf_idx) \
+COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, 0)
 
-#define COPY_TO_GUEST_BUF(args, buf_idx, src) \
-_raw_copy_to_guest_buf(args, buf_idx, &src, sizeof(src))
+#define COPY_TO_GUEST_BUF(bufs, buf_idx, src) \
+COPY_TO_GUEST_BUF_OFFSET(bufs, buf_idx, 0, src)
 
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
 unsigned int nr, const struct xen_dm_op_buf *buf)
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v8 for-4.9 2/5] hvm/dmop: Make copy_buf_{from, to}_guest for a buffer not big enough an error.

2017-04-21 Thread jennifer.herbert
From: Jennifer Herbert 

This makes copying to or from a buf that isn't big enough an error.
If the buffer isnt big enough, trying to carry on regardless
can only cause trouble later on.

Signed-off-by: Jennifer Herbert 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
Switch buf_bytes to {dst, src}_bytes for copy_{from,to}_guest
---
 xen/arch/x86/hvm/dm.c | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index e583e41..89186d2 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -36,30 +36,32 @@ static bool copy_buf_from_guest(const xen_dm_op_buf_t 
bufs[],
 unsigned int nr_bufs, void *dst,
 unsigned int idx, size_t dst_size)
 {
-size_t size;
+size_t buf_bytes;
 
 if ( idx >= nr_bufs )
 return false;
 
-memset(dst, 0, dst_size);
-
-size = min_t(size_t, dst_size, bufs[idx].size);
+buf_bytes = bufs[idx].size;
+if ( dst_size > buf_bytes )
+return false;
 
-return !copy_from_guest(dst, bufs[idx].h, size);
+return !copy_from_guest(dst, bufs[idx].h, dst_size);
 }
 
 static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
   unsigned int nr_bufs, unsigned int idx,
   const void *src, size_t src_size)
 {
-size_t size;
+size_t buf_bytes;
 
 if ( idx >= nr_bufs )
 return false;
 
-size = min_t(size_t, bufs[idx].size, src_size);
+buf_bytes = bufs[idx].size;
+if ( src_size > buf_bytes )
+return false;
 
-return !copy_to_guest(bufs[idx].h, src, size);
+return !copy_to_guest(bufs[idx].h, src, src_size);
 }
 
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v8 for-4.9 1/5] hvm/dmop: Box dmop_args rather than passing multiple parameters around

2017-04-21 Thread jennifer.herbert
From: Jennifer Herbert 

No functional change.

Signed-off-by: Jennifer Herbert 
Signed-off-by: Andrew Cooper 
Reviewed-by: Jan Beulich 
Reviewed-by: Paul Durrant 

--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
No change.
---
 xen/arch/x86/hvm/dm.c | 49 +
 1 file changed, 29 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d72b7bd..e583e41 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -25,6 +25,13 @@
 
 #include 
 
+struct dmop_args {
+domid_t domid;
+unsigned int nr_bufs;
+/* Reserve enough buf elements for all current hypercalls. */
+struct xen_dm_op_buf buf[2];
+};
+
 static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
 unsigned int nr_bufs, void *dst,
 unsigned int idx, size_t dst_size)
@@ -56,7 +63,7 @@ static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
 }
 
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
-unsigned int nr, struct xen_dm_op_buf *buf)
+unsigned int nr, const struct xen_dm_op_buf *buf)
 {
 if ( nr > (GB(1) >> PAGE_SHIFT) )
 return -EINVAL;
@@ -287,16 +294,14 @@ static int inject_event(struct domain *d,
 return 0;
 }
 
-static int dm_op(domid_t domid,
- unsigned int nr_bufs,
- xen_dm_op_buf_t bufs[])
+static int dm_op(const struct dmop_args *op_args)
 {
 struct domain *d;
 struct xen_dm_op op;
 bool const_op = true;
 long rc;
 
-rc = rcu_lock_remote_domain_by_id(domid, &d);
+rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
 if ( rc )
 return rc;
 
@@ -307,7 +312,7 @@ static int dm_op(domid_t domid,
 if ( rc )
 goto out;
 
-if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) )
+if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, 
sizeof(op)) )
 {
 rc = -EFAULT;
 goto out;
@@ -466,10 +471,10 @@ static int dm_op(domid_t domid,
 if ( data->pad )
 break;
 
-if ( nr_bufs < 2 )
+if ( op_args->nr_bufs < 2 )
 break;
 
-rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]);
+rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]);
 break;
 }
 
@@ -564,7 +569,7 @@ static int dm_op(domid_t domid,
 
 if ( (!rc || rc == -ERESTART) &&
  !const_op &&
- !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) )
+ !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, 
sizeof(op)) )
 rc = -EFAULT;
 
  out:
@@ -587,20 +592,21 @@ CHECK_dm_op_set_mem_type;
 CHECK_dm_op_inject_event;
 CHECK_dm_op_inject_msi;
 
-#define MAX_NR_BUFS 2
-
 int compat_dm_op(domid_t domid,
  unsigned int nr_bufs,
  XEN_GUEST_HANDLE_PARAM(void) bufs)
 {
-struct xen_dm_op_buf nat[MAX_NR_BUFS];
+struct dmop_args args;
 unsigned int i;
 int rc;
 
-if ( nr_bufs > MAX_NR_BUFS )
+if ( nr_bufs > ARRAY_SIZE(args.buf) )
 return -E2BIG;
 
-for ( i = 0; i < nr_bufs; i++ )
+args.domid = domid;
+args.nr_bufs = nr_bufs;
+
+for ( i = 0; i < args.nr_bufs; i++ )
 {
 struct compat_dm_op_buf cmp;
 
@@ -610,12 +616,12 @@ int compat_dm_op(domid_t domid,
 #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \
 guest_from_compat_handle((_d_)->h, (_s_)->h)
 
-XLAT_dm_op_buf(&nat[i], &cmp);
+XLAT_dm_op_buf(&args.buf[i], &cmp);
 
 #undef XLAT_dm_op_buf_HNDL_h
 }
 
-rc = dm_op(domid, nr_bufs, nat);
+rc = dm_op(&args);
 
 if ( rc == -ERESTART )
 rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
@@ -628,16 +634,19 @@ long do_dm_op(domid_t domid,
   unsigned int nr_bufs,
   XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
 {
-struct xen_dm_op_buf nat[MAX_NR_BUFS];
+struct dmop_args args;
 int rc;
 
-if ( nr_bufs > MAX_NR_BUFS )
+if ( nr_bufs > ARRAY_SIZE(args.buf) )
 return -E2BIG;
 
-if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) )
+args.domid = domid;
+args.nr_bufs = nr_bufs;
+
+if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
 return -EFAULT;
 
-rc = dm_op(domid, nr_bufs, nat);
+rc = dm_op(&args);
 
 if ( rc == -ERESTART )
 rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v8 for-4.9 5/5] dmop: Add xendevicemodel_modified_memory_bulk()

2017-04-21 Thread jennifer.herbert
From: Jennifer Herbert 

This new lib devicemodel call allows multiple extents of pages to be
marked as modified in a single call.  This is something needed for a
usecase I'm working on.

The xen side of the modified_memory call has been modified to accept
an array of extents.  The devicemodel library either provides an array
of length 1, to support the original library function, or a second
function allows an array to be provided.

Signed-off-by: Jennifer Herbert 
Reviewed-by: Jan Beulich 
Reviewed-by: Paul Durrant 
Acked-by: Wei Liu 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Ian Jackson 
CC: Wei Liu 
CC: Julien Grall 

--
No change
---
 tools/libs/devicemodel/core.c   |  30 --
 tools/libs/devicemodel/include/xendevicemodel.h |  19 +++-
 xen/arch/x86/hvm/dm.c   | 117 
 xen/include/public/hvm/dm_op.h  |  19 +++-
 4 files changed, 134 insertions(+), 51 deletions(-)

diff --git a/tools/libs/devicemodel/core.c b/tools/libs/devicemodel/core.c
index ff09819..d7c6476 100644
--- a/tools/libs/devicemodel/core.c
+++ b/tools/libs/devicemodel/core.c
@@ -459,22 +459,36 @@ int xendevicemodel_track_dirty_vram(
  dirty_bitmap, (size_t)(nr + 7) / 8);
 }
 
-int xendevicemodel_modified_memory(
-xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
-uint32_t nr)
+int xendevicemodel_modified_memory_bulk(
+xendevicemodel_handle *dmod, domid_t domid,
+struct xen_dm_op_modified_memory_extent *extents, uint32_t nr)
 {
 struct xen_dm_op op;
-struct xen_dm_op_modified_memory *data;
+struct xen_dm_op_modified_memory *header;
+size_t extents_size = nr * sizeof(struct xen_dm_op_modified_memory_extent);
 
 memset(&op, 0, sizeof(op));
 
 op.op = XEN_DMOP_modified_memory;
-data = &op.u.modified_memory;
+header = &op.u.modified_memory;
 
-data->first_pfn = first_pfn;
-data->nr = nr;
+header->nr_extents = nr;
+header->opaque = 0;
 
-return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
+return xendevicemodel_op(dmod, domid, 2, &op, sizeof(op),
+ extents, extents_size);
+}
+
+int xendevicemodel_modified_memory(
+xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
+uint32_t nr)
+{
+struct xen_dm_op_modified_memory_extent extent;
+
+extent.first_pfn = first_pfn;
+extent.nr = nr;
+
+return xendevicemodel_modified_memory_bulk(dmod, domid, &extent, 1);
 }
 
 int xendevicemodel_set_mem_type(
diff --git a/tools/libs/devicemodel/include/xendevicemodel.h 
b/tools/libs/devicemodel/include/xendevicemodel.h
index 1da216f..580fad2 100644
--- a/tools/libs/devicemodel/include/xendevicemodel.h
+++ b/tools/libs/devicemodel/include/xendevicemodel.h
@@ -254,8 +254,8 @@ int xendevicemodel_track_dirty_vram(
 uint32_t nr, unsigned long *dirty_bitmap);
 
 /**
- * This function notifies the hypervisor that a set of domain pages
- * have been modified.
+ * This function notifies the hypervisor that a set of contiguous
+ * domain pages have been modified.
  *
  * @parm dmod a handle to an open devicemodel interface.
  * @parm domid the domain id to be serviced
@@ -268,6 +268,21 @@ int xendevicemodel_modified_memory(
 uint32_t nr);
 
 /**
+ * This function notifies the hypervisor that a set of discontiguous
+ * domain pages have been modified.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm extents an array of extent structs, which each hold
+ a start_pfn and nr (number of pfns).
+ * @parm nr the number of extents in the array
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_modified_memory_bulk(
+xendevicemodel_handle *dmod, domid_t domid,
+struct xen_dm_op_modified_memory_extent extents[], uint32_t nr);
+
+/**
  * This function notifies the hypervisor that a set of domain pages
  * are to be treated in a specific way. (See the definition of
  * hvmmem_type_t).
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index c91895f..b1f547b 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -153,56 +153,102 @@ static int set_isa_irq_level(struct domain *d, uint8_t 
isa_irq,
 }
 
 static int modified_memory(struct domain *d,
-   struct xen_dm_op_modified_memory *data)
+   const struct dmop_args *bufs,
+   struct xen_dm_op_modified_memory *header)
 {
-xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
-unsigned int iter = 0;
-int rc = 0;
+#define EXTENTS_BUFFER 1
 
-if ( (data->first_pfn > last_pfn) ||
- (last_pfn > domain_get_maximum_gpfn(d)) )
-return -EINVAL;
+/* Process maximum of 256 pfns before checking for continuation. */
+const unsigned int cont_check_interval = 0x100;
+unsigned int *rem_extents =  &header->nr_extents;
+unsigned in

[Xen-devel] [PATCH v8 for-4.9 3/5] hvm/dmop: Implement copy_{to, from}_guest_buf() in terms of raw accessors

2017-04-21 Thread jennifer.herbert
From: Andrew Cooper 

This also allows the usual cases to be simplified, by omitting an unnecessary
buf parameters, and because the macros can appropriately size the object.

Signed-off-by: Andrew Cooper 
Signed-off-by: Jennifer Herbert 
--
CC: Paul Durrant 
CC: Andrew Cooper 
CC: Jan Beulich 
CC: Julien Grall 
---
Rebased.
---
 xen/arch/x86/hvm/dm.c | 43 ++-
 1 file changed, 26 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 89186d2..b31c252 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -32,38 +32,47 @@ struct dmop_args {
 struct xen_dm_op_buf buf[2];
 };
 
-static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
-unsigned int nr_bufs, void *dst,
-unsigned int idx, size_t dst_size)
+static bool _raw_copy_from_guest_buf(void *dst,
+ const struct dmop_args *args,
+ unsigned int buf_idx,
+ size_t dst_bytes)
 {
 size_t buf_bytes;
 
-if ( idx >= nr_bufs )
+if ( buf_idx >= args->nr_bufs )
 return false;
 
-buf_bytes = bufs[idx].size;
-if ( dst_size > buf_bytes )
+buf_bytes =  args->buf[buf_idx].size;
+
+if ( dst_bytes > buf_bytes )
 return false;
 
-return !copy_from_guest(dst, bufs[idx].h, dst_size);
+return !copy_from_guest(dst, args->buf[buf_idx].h, dst_bytes);
 }
 
-static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
-  unsigned int nr_bufs, unsigned int idx,
-  const void *src, size_t src_size)
+static bool _raw_copy_to_guest_buf(const struct dmop_args *args,
+   unsigned int buf_idx,
+   const void *src, size_t src_bytes)
 {
 size_t buf_bytes;
 
-if ( idx >= nr_bufs )
+if ( buf_idx >= args->nr_bufs )
 return false;
 
-buf_bytes = bufs[idx].size;
-if ( src_size > buf_bytes )
+buf_bytes = args->buf[buf_idx].size;
+
+if ( src_bytes > buf_bytes )
 return false;
 
-return !copy_to_guest(bufs[idx].h, src, src_size);
+return !copy_to_guest(args->buf[buf_idx].h, src, src_bytes);
 }
 
+#define COPY_FROM_GUEST_BUF(dst, args, buf_idx) \
+_raw_copy_from_guest_buf(&dst, args, buf_idx, sizeof(dst))
+
+#define COPY_TO_GUEST_BUF(args, buf_idx, src) \
+_raw_copy_to_guest_buf(args, buf_idx, &src, sizeof(src))
+
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
 unsigned int nr, const struct xen_dm_op_buf *buf)
 {
@@ -314,7 +323,7 @@ static int dm_op(const struct dmop_args *op_args)
 if ( rc )
 goto out;
 
-if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, 
sizeof(op)) )
+if ( !COPY_FROM_GUEST_BUF(op, op_args, 0) );
 {
 rc = -EFAULT;
 goto out;
@@ -570,8 +579,8 @@ static int dm_op(const struct dmop_args *op_args)
 }
 
 if ( (!rc || rc == -ERESTART) &&
- !const_op &&
- !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, 
sizeof(op)) )
+ !const_op && !COPY_TO_GUEST_BUF(op_args, 0, op) )
+
 rc = -EFAULT;
 
  out:
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel