> >>> +struct xen_sysctl_tmem_op {
> >>> +    uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
> >>> +    int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
> >>> +    uint32_t cli_id;    /* IN: client id, 0 for 
> >>> XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
> >>> +                           for all others can be the domain id or
> >>> +                           XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
> >>> +    uint32_t arg1;      /* IN: If not applicable to command use 0. */
> >>> +    uint32_t arg2;      /* IN: If not applicable to command use 0. */
> >> Please can this interface be fixed as part of the move, even if it is in
> >> subsequent patches for clarity.
> > I will gladly fix this interface in further patches. By all means!
> 
> What I wish to avoid is 4.6 releasing with the API in this state, as
> adjusting valgrind and strace to compensate will be miserable.
> 
> The best solution would be to have this patch and the fixups adjacent in
> the series, at which point the valgrind/strace adjustments can start
> with the clean API for 4.6

See attached two patches. The first one makes the 'oid[3]' be a nice structure.

Then I decided to see if I can expand that to also be part of the
'tmem_op', which looked legit (as it is the same size and offset and
pahole wise it looks right).

But sadly the compat layer is not happy with me:


In file included from /home/konrad/ssd/konrad/xen/xen/include/xen/compat.h:12:0,
                 from /home/konrad/ssd/konrad/xen/xen/include/compat/xen.h:3,
                 from /home/konrad/ssd/konrad/xen/xen/include/xen/shared.h:6,
                 from /home/konrad/ssd/konrad/xen/xen/include/xen/sched.h:7,
                 from setup.c:5:
/home/konrad/ssd/konrad/xen/xen/include/xen/tmem_xen.h: In function 
‘tmem_get_tmemop_from_client’:
/home/konrad/ssd/konrad/xen/xen/include/compat/xlat.h:935:26: error: 
incompatible types when assigning to type ‘xen_tmem_oid_t’ from type 
‘compat_tmem_oid_t’
         (_d_)->u.gen.oid = (_s_)->u.gen.oid; \
                          ^
/home/konrad/ssd/konrad/xen/xen/include/xen/tmem_xen.h:306:9: note: in 
expansion of macro ‘XLAT_tmem_op’
         XLAT_tmem_op(op, &cop);
         ^
make[3]: *** [setup.o] Error 1
make[3]: *** Waiting for unfinished jobs....
In file included from /home/konrad/ssd/konrad/xen/xen/include/xen/compat.h:12:0,
                 from /home/konrad/ssd/konrad/xen/xen/include/compat/xen.h:3,
                 from /home/konrad/ssd/konrad/xen/xen/include/xen/shared.h:6,
                 from /home/konrad/ssd/konrad/xen/xen/include/xen/sched.h:7,
                 from memory.c:15:
/home/konrad/ssd/konrad/xen/xen/include/xen/tmem_xen.h: In function 
‘tmem_get_tmemop_from_client’:
/home/konrad/ssd/konrad/xen/xen/include/compat/xlat.h:935:26: error: 
incompatible types when assigning to type ‘xen_tmem_oid_t’ from type 
‘compat_tmem_oid_t’
         (_d_)->u.gen.oid = (_s_)->u.gen.oid; \
                          ^
/home/konrad/ssd/konrad/xen/xen/include/xen/tmem_xen.h:306:9: note: in 
expansion of macro ‘XLAT_tmem_op’
         XLAT_tmem_op(op, &cop);
         ^
make[3]: *** [memory.o] Error 1
make[3]: *** Waiting for unfinished jobs....
In file included from /home/konrad/ssd/konrad/xen/xen/include/xen/compat.h:12:0,
                 from /home/konrad/ssd/konrad/xen/xen/include/compat/xen.h:3,
                 from /home/konrad/ssd/konrad/xen/xen/include/xen/shared.h:6,
                 from /home/konrad/ssd/konrad/xen/xen/include/xen/sched.h:7,
                 from page_alloc.c:27:
/home/konrad/ssd/konrad/xen/xen/include/xen/tmem_xen.h: In function 
‘tmem_get_tmemop_from_client’:
/home/konrad/ssd/konrad/xen/xen/include/compat/xlat.h:935:26: error: 
incompatible types when assigning to type ‘xen_tmem_oid_t’ from type 
‘compat_tmem_oid_t’
         (_d_)->u.gen.oid = (_s_)->u.gen.oid; \
                          ^
/home/konrad/ssd/konrad/xen/xen/include/xen/tmem_xen.h:306:9: note: in 
expansion of macro ‘XLAT_tmem_op’
         XLAT_tmem_op(op, &cop);
         ^
make[3]: *** [page_alloc.o] Error 1
make[2]: *** [/home/konrad/ssd/konrad/xen/xen/common/built_in.o] Error 2
make[2]: *** Waiting for unfinished jobs....
make[2]: *** [/home/konrad/ssd/konrad/xen/xen/arch/x86/built_in.o] Error 2
make[1]: *** [/home/konrad/ssd/konrad/xen/xen/xen] Error 2
make: *** [build] Error 2

Which is right:


struct compat_tmem_oid {
    uint64_t oid[3];
};
typedef struct compat_tmem_oid compat_tmem_oid_t;

typedef COMPAT_HANDLE(char) tmem_cli_va_compat_t;
struct compat_tmem_op {
    uint32_t cmd;
    int32_t pool_id;
    union {
        struct {
            uint64_t uuid[2];
            uint32_t flags;
            uint32_t arg1;
        } creat;
        struct {

            compat_tmem_oid_t oid;   <====== We have 'struct tmem_oid' in the 
tmem_op.

            uint32_t index;
            uint32_t tmem_offset;
            uint32_t pfn_offset;
            uint32_t len;
            compat_pfn_t cmfn;
        } gen;
    } u;
};
typedef struct compat_tmem_op tmem_op_compat_t;
DEFINE_COMPAT_HANDLE(tmem_op_compat_t);
#define XLAT_tmem_op(_d_, _s_) do { \
    (_d_)->cmd = (_s_)->cmd; \
    (_d_)->pool_id = (_s_)->pool_id; \
    switch (u) { \
    case XLAT_tmem_op_u_creat: \
        { \
            unsigned int i0; \
            for (i0 = 0; i0 <  2; ++i0) { \
                (_d_)->u.creat.uuid[i0] = (_s_)->u.creat.uuid[i0]; \
            } \
        } \
        (_d_)->u.creat.flags = (_s_)->u.creat.flags; \
        (_d_)->u.creat.arg1 = (_s_)->u.creat.arg1; \
        break; \
    case XLAT_tmem_op_u_gen: \
        (_d_)->u.gen.oid = (_s_)->u.gen.oid; \
        (_d_)->u.gen.index = (_s_)->u.gen.index; \
        (_d_)->u.gen.tmem_offset = (_s_)->u.gen.tmem_offset; \
        (_d_)->u.gen.pfn_offset = (_s_)->u.gen.pfn_offset; \
        (_d_)->u.gen.len = (_s_)->u.gen.len; \
        (_d_)->u.gen.cmfn = (_s_)->u.gen.cmfn; \
        break; \
    } \
} while (0)


static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
{
#ifdef CONFIG_COMPAT
    if ( has_hvm_container_vcpu(current) ?
         hvm_guest_x86_mode(current) != 8 :
         is_pv_32bit_vcpu(current) )
    {
        int rc;
        enum XLAT_tmem_op_u u;
        tmem_op_compat_t cop;

        rc = copy_from_guest(&cop, guest_handle_cast(uops, void), 1);
        if ( rc )
            return rc;
        switch ( cop.cmd )
        {
        case TMEM_NEW_POOL:   u = XLAT_tmem_op_u_creat; break;
        case TMEM_AUTH:       u = XLAT_tmem_op_u_creat; break;
        case TMEM_RESTORE_NEW:u = XLAT_tmem_op_u_creat; break;
        default:              u = XLAT_tmem_op_u_gen ;  break;
        }
#define XLAT_tmem_op_HNDL_u_ctrl_buf(_d_, _s_) \
        guest_from_compat_handle((_d_)->u.ctrl.buf, (_s_)->u.ctrl.buf)
        XLAT_tmem_op(op, &cop);
#undef XLAT_tmem_op_HNDL_u_ctrl_buf
        return 0;
    }
#endif
    return copy_from_guest(op, uops, 1);
}
>From 93918a423a1f78af16f4b19cec30014b5448abcb Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Date: Fri, 28 Aug 2015 10:02:33 -0400
Subject: [PATCH 1/2] tmem: Make the uint64_t oid[3] a proper structure:
 xen_sysctl_tmem_oid

And use it almost everywhere. It is easy to use it for the
sysctl since the hypervisor and toolstack are intertwined.

But for the tmem hypercall we need to be dilligient.

We also move some of the parameters on functions to be within
the right location.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
---
 xen/common/tmem.c           | 57 ++++++++++++++++++++++++---------------------
 xen/include/public/sysctl.h |  7 +++++-
 2 files changed, 37 insertions(+), 27 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index b62b56e..3a1345a 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -125,12 +125,8 @@ struct tmem_pool {
 #define is_persistent(_p)  (_p->persistent)
 #define is_shared(_p)      (_p->shared)
 
-struct oid {
-    uint64_t oid[3];
-};
-
 struct tmem_object_root {
-    struct oid oid;
+    struct xen_sysctl_tmem_oid oid;
     struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
     unsigned long objnode_count; /* atomicity depends on obj_spinlock */
     long pgp_count; /* atomicity depends on obj_spinlock */
@@ -158,7 +154,7 @@ struct tmem_page_descriptor {
             };
             struct tmem_object_root *obj;
         } us;
-        struct oid inv_oid;  /* used for invalid list only */
+        struct xen_sysctl_tmem_oid inv_oid;  /* used for invalid list only */
     };
     pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
                     else compressed data (cdata) */
@@ -815,7 +811,8 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg)
 
 /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/
 
-static int oid_compare(struct oid *left, struct oid *right)
+static int oid_compare(struct xen_sysctl_tmem_oid *left,
+                       struct xen_sysctl_tmem_oid *right)
 {
     if ( left->oid[2] == right->oid[2] )
     {
@@ -839,19 +836,20 @@ static int oid_compare(struct oid *left, struct oid 
*right)
         return 1;
 }
 
-static void oid_set_invalid(struct oid *oidp)
+static void oid_set_invalid(struct xen_sysctl_tmem_oid *oidp)
 {
     oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
 }
 
-static unsigned oid_hash(struct oid *oidp)
+static unsigned oid_hash(struct xen_sysctl_tmem_oid *oidp)
 {
     return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
                      BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
 }
 
 /* searches for object==oid in pool, returns locked object if found */
-static struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid 
*oidp)
+static struct tmem_object_root * obj_find(struct tmem_pool *pool,
+                                          struct xen_sysctl_tmem_oid *oidp)
 {
     struct rb_node *node;
     struct tmem_object_root *obj;
@@ -887,7 +885,7 @@ restart_find:
 static void obj_free(struct tmem_object_root *obj)
 {
     struct tmem_pool *pool;
-    struct oid old_oid;
+    struct xen_sysctl_tmem_oid old_oid;
 
     ASSERT_SPINLOCK(&obj->obj_spinlock);
     ASSERT(obj != NULL);
@@ -946,7 +944,8 @@ static int obj_rb_insert(struct rb_root *root, struct 
tmem_object_root *obj)
  * allocate, initialize, and insert an tmem_object_root
  * (should be called only if find failed)
  */
-static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, struct oid 
*oidp)
+static struct tmem_object_root * obj_alloc(struct tmem_pool *pool,
+                                           struct xen_sysctl_tmem_oid *oidp)
 {
     struct tmem_object_root *obj;
 
@@ -1531,8 +1530,8 @@ cleanup:
 }
 
 static int do_tmem_put(struct tmem_pool *pool,
-              struct oid *oidp, uint32_t index,
-              xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
+                       struct xen_sysctl_tmem_oid *oidp, uint32_t index,
+                       xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
 {
     struct tmem_object_root *obj = NULL;
     struct tmem_page_descriptor *pgp = NULL;
@@ -1696,8 +1695,9 @@ unlock_obj:
     return ret;
 }
 
-static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t 
index,
-              xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
+static int do_tmem_get(struct tmem_pool *pool,
+                       struct xen_sysctl_tmem_oid *oidp, uint32_t index,
+                       xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
 {
     struct tmem_object_root *obj;
     struct tmem_page_descriptor *pgp;
@@ -1774,7 +1774,8 @@ bad_copy:
     return rc;
 }
 
-static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, 
uint32_t index)
+static int do_tmem_flush_page(struct tmem_pool *pool,
+                              struct xen_sysctl_tmem_oid *oidp, uint32_t index)
 {
     struct tmem_object_root *obj;
     struct tmem_page_descriptor *pgp;
@@ -1807,7 +1808,8 @@ out:
         return 1;
 }
 
-static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp)
+static int do_tmem_flush_object(struct tmem_pool *pool,
+                                struct xen_sysctl_tmem_oid *oidp)
 {
     struct tmem_object_root *obj;
 
@@ -2432,7 +2434,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t 
pool_id,
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     struct tmem_page_descriptor *pgp;
-    struct oid oid;
+    struct xen_sysctl_tmem_oid oid;
     int ret = 0;
     struct tmem_handle h;
 
@@ -2525,8 +2527,10 @@ out:
     return ret;
 }
 
-static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, struct oid 
*oidp,
-                      uint32_t index, tmem_cli_va_param_t buf, uint32_t 
bufsize)
+static int tmemc_restore_put_page(int cli_id, uint32_t pool_id,
+                                  struct xen_sysctl_tmem_oid *oidp,
+                                  uint32_t index, tmem_cli_va_param_t buf,
+                                  uint32_t bufsize)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
@@ -2542,8 +2546,9 @@ static int tmemc_restore_put_page(int cli_id, uint32_t 
pool_id, struct oid *oidp
     return do_tmem_put(pool, oidp, index, 0, buf);
 }
 
-static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid 
*oidp,
-                        uint32_t index)
+static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id,
+                                    struct xen_sysctl_tmem_oid *oidp,
+                                    uint32_t index)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
@@ -2559,7 +2564,7 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
     int ret;
     uint32_t pool_id = op->pool_id;
     uint32_t cmd = op->cmd;
-    struct oid *oidp = (struct oid *)(&op->oid[0]);
+    struct xen_sysctl_tmem_oid *oidp = &op->oid;
 
     if ( op->pad != 0 )
         return -EINVAL;
@@ -2633,7 +2638,7 @@ long do_tmem_op(tmem_cli_op_t uops)
     struct tmem_op op;
     struct client *client = current->domain->tmem_client;
     struct tmem_pool *pool = NULL;
-    struct oid *oidp;
+    struct xen_sysctl_tmem_oid *oidp;
     int rc = 0;
     bool_t succ_get = 0, succ_put = 0;
     bool_t non_succ_get = 0, non_succ_put = 0;
@@ -2714,7 +2719,7 @@ long do_tmem_op(tmem_cli_op_t uops)
             write_unlock(&tmem_rwlock);
             read_lock(&tmem_rwlock);
 
-            oidp = (struct oid *)&op.u.gen.oid[0];
+            oidp = (struct xen_sysctl_tmem_oid *)&op.u.gen.oid[0];
             switch ( op.cmd )
             {
             case TMEM_NEW_POOL:
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index c1566e6..ee5b33f 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -737,6 +737,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
 #define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
 
+struct xen_sysctl_tmem_oid {
+    uint64_t oid[3];
+};
+typedef struct xen_sysctl_tmem_oid xen_sysctl_tmem_oid_t;
+
 struct xen_sysctl_tmem_op {
     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
     int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
@@ -746,7 +751,7 @@ struct xen_sysctl_tmem_op {
     uint32_t arg1;      /* IN: If not applicable to command use 0. */
     uint32_t arg2;      /* IN: If not applicable to command use 0. */
     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
-    uint64_t oid[3];    /* IN: If not applicable to command use 0. */
+    xen_sysctl_tmem_oid_t oid;     /* IN: If not applicable to command use 0. 
*/
     XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. 
*/
 };
 typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
-- 
2.1.0

>From 35c2063457b440cf20359bb04aeb0a0981aa7388 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Date: Fri, 28 Aug 2015 10:03:22 -0400
Subject: [PATCH 2/2] tmem: Use 'struct xen_tmem_oid' for all every user.

Patch "tmem: Make the uint64_t oid[3] a proper structure:
xen_sysctl_tmem_oid" converted the sysctl API to use an
proper structure. But it did not do it for the tmem hypercall.

This expands that and converts the tmem hypercall. For this
to work properly we change the name to 'struct xen_tmem_oid'
and use it everywhere. To deflect the #include usage of tmem.h
in sysctl.h or vice-versa we define it in both places and
use an #define to keep compiler happy.

We had to expand the compat layer to copy now each entry.

The layout (and size) of this structure in memory for the
'struct tmem_op' (so guest facing) is the same! Verified
via pahole.

*TODO*: Test with 32/64 bit guests.

--- /tmp/old    2015-08-27 16:34:00.535638730 -0400
+++ /tmp/new    2015-08-27 16:34:10.447705328 -0400
@@ -8,7 +8,7 @@
                        uint32_t   arg1;                 /*    28     4 */
                } creat;                                 /*          24 */
                struct {
-                       uint64_t   oid[3];               /*     8    24 */
+                       xen_tmem_oid_t oid;              /*     8    24 */
                        uint32_t   index;                /*    32     4 */
                        uint32_t   tmem_offset;          /*    36     4 */
                        uint32_t   pfn_offset;           /*    40     4 */

Signed-off-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
---
 xen/common/tmem.c           | 38 +++++++++++++++++++-------------------
 xen/include/public/sysctl.h |  9 ++++++---
 xen/include/public/tmem.h   | 13 ++++++++++++-
 3 files changed, 37 insertions(+), 23 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 3a1345a..bd228b6 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -126,7 +126,7 @@ struct tmem_pool {
 #define is_shared(_p)      (_p->shared)
 
 struct tmem_object_root {
-    struct xen_sysctl_tmem_oid oid;
+    struct xen_tmem_oid oid;
     struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
     unsigned long objnode_count; /* atomicity depends on obj_spinlock */
     long pgp_count; /* atomicity depends on obj_spinlock */
@@ -154,7 +154,7 @@ struct tmem_page_descriptor {
             };
             struct tmem_object_root *obj;
         } us;
-        struct xen_sysctl_tmem_oid inv_oid;  /* used for invalid list only */
+        struct xen_tmem_oid inv_oid;  /* used for invalid list only */
     };
     pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
                     else compressed data (cdata) */
@@ -811,8 +811,8 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg)
 
 /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/
 
-static int oid_compare(struct xen_sysctl_tmem_oid *left,
-                       struct xen_sysctl_tmem_oid *right)
+static int oid_compare(struct xen_tmem_oid *left,
+                       struct xen_tmem_oid *right)
 {
     if ( left->oid[2] == right->oid[2] )
     {
@@ -836,12 +836,12 @@ static int oid_compare(struct xen_sysctl_tmem_oid *left,
         return 1;
 }
 
-static void oid_set_invalid(struct xen_sysctl_tmem_oid *oidp)
+static void oid_set_invalid(struct xen_tmem_oid *oidp)
 {
     oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
 }
 
-static unsigned oid_hash(struct xen_sysctl_tmem_oid *oidp)
+static unsigned oid_hash(struct xen_tmem_oid *oidp)
 {
     return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
                      BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
@@ -849,7 +849,7 @@ static unsigned oid_hash(struct xen_sysctl_tmem_oid *oidp)
 
 /* searches for object==oid in pool, returns locked object if found */
 static struct tmem_object_root * obj_find(struct tmem_pool *pool,
-                                          struct xen_sysctl_tmem_oid *oidp)
+                                          struct xen_tmem_oid *oidp)
 {
     struct rb_node *node;
     struct tmem_object_root *obj;
@@ -885,7 +885,7 @@ restart_find:
 static void obj_free(struct tmem_object_root *obj)
 {
     struct tmem_pool *pool;
-    struct xen_sysctl_tmem_oid old_oid;
+    struct xen_tmem_oid old_oid;
 
     ASSERT_SPINLOCK(&obj->obj_spinlock);
     ASSERT(obj != NULL);
@@ -945,7 +945,7 @@ static int obj_rb_insert(struct rb_root *root, struct 
tmem_object_root *obj)
  * (should be called only if find failed)
  */
 static struct tmem_object_root * obj_alloc(struct tmem_pool *pool,
-                                           struct xen_sysctl_tmem_oid *oidp)
+                                           struct xen_tmem_oid *oidp)
 {
     struct tmem_object_root *obj;
 
@@ -1530,7 +1530,7 @@ cleanup:
 }
 
 static int do_tmem_put(struct tmem_pool *pool,
-                       struct xen_sysctl_tmem_oid *oidp, uint32_t index,
+                       struct xen_tmem_oid *oidp, uint32_t index,
                        xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
 {
     struct tmem_object_root *obj = NULL;
@@ -1696,7 +1696,7 @@ unlock_obj:
 }
 
 static int do_tmem_get(struct tmem_pool *pool,
-                       struct xen_sysctl_tmem_oid *oidp, uint32_t index,
+                       struct xen_tmem_oid *oidp, uint32_t index,
                        xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
 {
     struct tmem_object_root *obj;
@@ -1775,7 +1775,7 @@ bad_copy:
 }
 
 static int do_tmem_flush_page(struct tmem_pool *pool,
-                              struct xen_sysctl_tmem_oid *oidp, uint32_t index)
+                              struct xen_tmem_oid *oidp, uint32_t index)
 {
     struct tmem_object_root *obj;
     struct tmem_page_descriptor *pgp;
@@ -1809,7 +1809,7 @@ out:
 }
 
 static int do_tmem_flush_object(struct tmem_pool *pool,
-                                struct xen_sysctl_tmem_oid *oidp)
+                                struct xen_tmem_oid *oidp)
 {
     struct tmem_object_root *obj;
 
@@ -2434,7 +2434,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t 
pool_id,
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     struct tmem_page_descriptor *pgp;
-    struct xen_sysctl_tmem_oid oid;
+    struct xen_tmem_oid oid;
     int ret = 0;
     struct tmem_handle h;
 
@@ -2528,7 +2528,7 @@ out:
 }
 
 static int tmemc_restore_put_page(int cli_id, uint32_t pool_id,
-                                  struct xen_sysctl_tmem_oid *oidp,
+                                  struct xen_tmem_oid *oidp,
                                   uint32_t index, tmem_cli_va_param_t buf,
                                   uint32_t bufsize)
 {
@@ -2547,7 +2547,7 @@ static int tmemc_restore_put_page(int cli_id, uint32_t 
pool_id,
 }
 
 static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id,
-                                    struct xen_sysctl_tmem_oid *oidp,
+                                    struct xen_tmem_oid *oidp,
                                     uint32_t index)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
@@ -2564,7 +2564,7 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
     int ret;
     uint32_t pool_id = op->pool_id;
     uint32_t cmd = op->cmd;
-    struct xen_sysctl_tmem_oid *oidp = &op->oid;
+    struct xen_tmem_oid *oidp = &op->oid;
 
     if ( op->pad != 0 )
         return -EINVAL;
@@ -2638,7 +2638,7 @@ long do_tmem_op(tmem_cli_op_t uops)
     struct tmem_op op;
     struct client *client = current->domain->tmem_client;
     struct tmem_pool *pool = NULL;
-    struct xen_sysctl_tmem_oid *oidp;
+    struct xen_tmem_oid *oidp;
     int rc = 0;
     bool_t succ_get = 0, succ_put = 0;
     bool_t non_succ_get = 0, non_succ_put = 0;
@@ -2719,7 +2719,7 @@ long do_tmem_op(tmem_cli_op_t uops)
             write_unlock(&tmem_rwlock);
             read_lock(&tmem_rwlock);
 
-            oidp = (struct xen_sysctl_tmem_oid *)&op.u.gen.oid[0];
+            oidp = &op.u.gen.oid;
             switch ( op.cmd )
             {
             case TMEM_NEW_POOL:
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index ee5b33f..e0bdda0 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -737,10 +737,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
 #define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
 
-struct xen_sysctl_tmem_oid {
+#ifndef TMEM_OID_DEFINED
+#define TMEM_OID_DEFINED
+struct xen_tmem_oid {
     uint64_t oid[3];
 };
-typedef struct xen_sysctl_tmem_oid xen_sysctl_tmem_oid_t;
+typedef struct xen_tmem_oid xen_tmem_oid_t;
+#endif
 
 struct xen_sysctl_tmem_op {
     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
@@ -751,7 +754,7 @@ struct xen_sysctl_tmem_op {
     uint32_t arg1;      /* IN: If not applicable to command use 0. */
     uint32_t arg2;      /* IN: If not applicable to command use 0. */
     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
-    xen_sysctl_tmem_oid_t oid;     /* IN: If not applicable to command use 0. 
*/
+    xen_tmem_oid_t oid; /* IN: If not applicable to command use 0. */
     XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. 
*/
 };
 typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
diff --git a/xen/include/public/tmem.h b/xen/include/public/tmem.h
index e4ee704..24ffa58 100644
--- a/xen/include/public/tmem.h
+++ b/xen/include/public/tmem.h
@@ -73,6 +73,13 @@
 #define EFROZEN                 1000
 #define EEMPTY                  1001
 
+#ifndef TMEM_OID_DEFINED
+#define TMEM_OID_DEFINED
+struct xen_tmem_oid {
+    uint64_t oid[3];
+};
+typedef struct xen_tmem_oid xen_tmem_oid_t;
+#endif
 
 #ifndef __ASSEMBLY__
 #if __XEN_INTERFACE_VERSION__ < 0x00040400
@@ -89,7 +96,11 @@ struct tmem_op {
             uint32_t arg1;
         } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
         struct {
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
             uint64_t oid[3];
+#else
+            xen_tmem_oid_t oid;
+#endif
             uint32_t index;
             uint32_t tmem_offset;
             uint32_t pfn_offset;
@@ -104,7 +115,7 @@ DEFINE_XEN_GUEST_HANDLE(tmem_op_t);
 struct tmem_handle {
     uint32_t pool_id;
     uint32_t index;
-    uint64_t oid[3];
+    xen_tmem_oid_t oid;
 };
 #endif
 
-- 
2.1.0

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to