On Tue, Sep 01, 2015 at 01:04:11AM -0600, Jan Beulich wrote:
> >>> On 31.08.15 at 18:14, <konrad.w...@oracle.com> wrote:
> >> >> > --- a/xen/include/public/sysctl.h
> >> >> > +++ b/xen/include/public/sysctl.h
> >> >> > @@ -737,6 +737,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
> >> >> >  #define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
> >> >> >  #define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
> >> >> >  
> >> >> > +struct tmem_oid {
> >> >> > +    uint64_t oid[3];
> >> >> > +};
> >> >> > +typedef struct tmem_oid tmem_oid_t;
> >> >> > +DEFINE_XEN_GUEST_HANDLE(tmem_oid_t);
> >> >> 
> >> >> I know this is going to be a boring mechanical thing, but I'd really
> >> >> like to see this to be xen_tmem_oid (and alike), especially since
> >> >> you intend to also use the type for th non-tools part of the
> >> >> interface.
> >> >
> >> >This throws a wrench in the compat autogeneration tool.
> >> >
> >> >I keep on getting:
> >> >
> >> >Fields of 'compat_xen_tmem_oid' not found in 'compat/tmem.h'
> >> >and it failing to generate compat/.xlat/tmem.h file. Sticking an prefix of
> >> >xen to the 'common/compat/tmem_xen.c' or just leaving it as is did not 
> >> >help:
> >> 
> >> Did you perhaps forget to adjust include/xlat.lst? I'm certain adding a 
> > prefix won't
> > 
> > Yes. And ran 'make distclean' before compiling.
> > 
> >> break everything, albeit iirc there's not going to be a 
> >> compat_xen_tmem_oid, 
> > but
> >> the xen_ prefix in such cases gets replaced by compat_.
> > 
> > Which is part of the problem - in 'xen_tmem_oid' the 'xen' parts got 
> > replaced.
> > And the 'compat_tmem_oid' gets generated but the tool is trying to find
> > 'compat_xen_tmem_oid' (at least that is the error) and can't find it now.
> 
> Perhaps that's because the structure definition is public/sysctl.h
> (note how the error message refers to public/tmem.h)? Since you
> move it in a later patch, could you put it into public/tmem.h right
> away?

There is no need for the compat layer with the patch
("tmem: Make the uint64_t oid[3] a proper structure: ") which
alters 'xen_sysctl_tmem_op':

+struct xen_tmem_oid {
+    uint64_t oid[3];
+};
+typedef struct xen_tmem_oid xen_tmem_oid_t;
+DEFINE_XEN_GUEST_HANDLE(xen_tmem_oid_t);
+
 struct xen_sysctl_tmem_op {
     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
     int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
@@ -746,7 +752,7 @@ struct xen_sysctl_tmem_op {
     uint32_t arg1;      /* IN: If not applicable to command use 0. */
     uint32_t arg2;      /* IN: If not applicable to command use 0. */
     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
-    uint64_t oid[3];    /* IN: If not applicable to command use 0s. */
+    xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
     XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. 
*/
 };
 typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;

There is only need for the compat layer once I move the 'xen_tmem_oid_t' to
the tmem.h header file - and there is where the introduction in xlat.lst is 
done:

index 8cedee7..ace1d53 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -103,6 +103,7 @@
 !      sched_poll                      sched.h
 ?      sched_remote_shutdown           sched.h
 ?      sched_shutdown                  sched.h
+?      xen_tmem_oid                    tmem.h
 !      tmem_op                         tmem.h
 ?      t_buf                           trace.h
 ?      vcpu_get_physid                 vcpu.h

.. and which fails compilation.

I think I am confusing the matters by responding on the thread that
refers to the introduction of 'struct xen_tmem_oid' in sysctl (which works
fine btw), but I am tripping over problems with the next patch
('tmem: Use 'struct xen_tmem_oid' for every user')
which moves the 'struct xen_tmem_oid' in the tmem.h to be used by
both hypercalls (tmem.h and sysctl.h).

Attaching both patches.
> 
> Jan
> 
>From 6a94354fbacf92d9c4ea3aeffcf0d36ba49a1e17 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Date: Mon, 31 Aug 2015 10:57:27 -0400
Subject: [PATCH 1/2] tmem: Make the uint64_t oid[3] a proper structure:
 xen_tmem_oid

And use it almost everywhere. It is easy to use it for the
sysctl since the hypervisor and toolstack are intertwined.

But for the tmem hypercall we need to be dilligient (as it
is guest facing) so delaying that to another patch:
"tmem: Use 'struct xen_tmem_oid' for every user" to help
with bisection issues.

We also move some of the parameters on functions to be within
the right location.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
---
 tools/libxc/include/xenctrl.h |  6 +----
 tools/libxc/xc_tmem.c         | 16 ++++++------
 xen/common/tmem.c             | 57 +++++++++++++++++++++++--------------------
 xen/include/public/sysctl.h   |  8 +++++-
 4 files changed, 46 insertions(+), 41 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 9e34769..2000f12 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2295,13 +2295,9 @@ int xc_disable_turbo(xc_interface *xch, int cpuid);
  * tmem operations
  */
 
-struct tmem_oid {
-    uint64_t oid[3];
-};
-
 int xc_tmem_control_oid(xc_interface *xch, int32_t pool_id, uint32_t subop,
                         uint32_t cli_id, uint32_t arg1, uint32_t arg2,
-                        struct tmem_oid oid, void *buf);
+                        struct xen_tmem_oid oid, void *buf);
 int xc_tmem_control(xc_interface *xch,
                     int32_t pool_id, uint32_t subop, uint32_t cli_id,
                     uint32_t arg1, uint32_t arg2, void *buf);
diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c
index 505595b..558d2ea 100644
--- a/tools/libxc/xc_tmem.c
+++ b/tools/libxc/xc_tmem.c
@@ -64,9 +64,9 @@ int xc_tmem_control(xc_interface *xch,
     sysctl.u.tmem_op.arg1 = arg1;
     sysctl.u.tmem_op.arg2 = arg2;
     sysctl.u.tmem_op.pad = 0;
-    sysctl.u.tmem_op.oid[0] = 0;
-    sysctl.u.tmem_op.oid[1] = 0;
-    sysctl.u.tmem_op.oid[2] = 0;
+    sysctl.u.tmem_op.oid.oid[0] = 0;
+    sysctl.u.tmem_op.oid.oid[1] = 0;
+    sysctl.u.tmem_op.oid.oid[2] = 0;
 
     if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
     {
@@ -98,7 +98,7 @@ int xc_tmem_control_oid(xc_interface *xch,
                         uint32_t cli_id,
                         uint32_t arg1,
                         uint32_t arg2,
-                        struct tmem_oid oid,
+                        struct xen_tmem_oid oid,
                         void *buf)
 {
     DECLARE_SYSCTL;
@@ -112,9 +112,7 @@ int xc_tmem_control_oid(xc_interface *xch,
     sysctl.u.tmem_op.arg1 = arg1;
     sysctl.u.tmem_op.arg2 = arg2;
     sysctl.u.tmem_op.pad = 0;
-    sysctl.u.tmem_op.oid[0] = oid.oid[0];
-    sysctl.u.tmem_op.oid[1] = oid.oid[1];
-    sysctl.u.tmem_op.oid[2] = oid.oid[2];
+    sysctl.u.tmem_op.oid = oid;
 
     if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
     {
@@ -453,7 +451,7 @@ int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
         }
         for ( j = n_pages; j > 0; j-- )
         {
-            struct tmem_oid oid;
+            struct xen_tmem_oid oid;
             uint32_t index;
             int rc;
             if ( read_exact(io_fd, &oid, sizeof(oid)) )
@@ -487,7 +485,7 @@ int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
 int xc_tmem_restore_extra(xc_interface *xch, int dom, int io_fd)
 {
     uint32_t pool_id;
-    struct tmem_oid oid;
+    struct xen_tmem_oid oid;
     uint32_t index;
     int count = 0;
     int checksum = 0;
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index c73add5..eea3979 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -125,12 +125,8 @@ struct tmem_pool {
 #define is_persistent(_p)  (_p->persistent)
 #define is_shared(_p)      (_p->shared)
 
-struct oid {
-    uint64_t oid[3];
-};
-
 struct tmem_object_root {
-    struct oid oid;
+    struct xen_tmem_oid oid;
     struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
     unsigned long objnode_count; /* atomicity depends on obj_spinlock */
     long pgp_count; /* atomicity depends on obj_spinlock */
@@ -158,7 +154,7 @@ struct tmem_page_descriptor {
             };
             struct tmem_object_root *obj;
         } us;
-        struct oid inv_oid;  /* used for invalid list only */
+        struct xen_tmem_oid inv_oid;  /* used for invalid list only */
     };
     pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
                     else compressed data (cdata) */
@@ -815,7 +811,8 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg)
 
 /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/
 
-static int oid_compare(struct oid *left, struct oid *right)
+static int oid_compare(struct xen_tmem_oid *left,
+                       struct xen_tmem_oid *right)
 {
     if ( left->oid[2] == right->oid[2] )
     {
@@ -839,19 +836,20 @@ static int oid_compare(struct oid *left, struct oid 
*right)
         return 1;
 }
 
-static void oid_set_invalid(struct oid *oidp)
+static void oid_set_invalid(struct xen_tmem_oid *oidp)
 {
     oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
 }
 
-static unsigned oid_hash(struct oid *oidp)
+static unsigned oid_hash(struct xen_tmem_oid *oidp)
 {
     return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
                      BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
 }
 
 /* searches for object==oid in pool, returns locked object if found */
-static struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid 
*oidp)
+static struct tmem_object_root * obj_find(struct tmem_pool *pool,
+                                          struct xen_tmem_oid *oidp)
 {
     struct rb_node *node;
     struct tmem_object_root *obj;
@@ -887,7 +885,7 @@ restart_find:
 static void obj_free(struct tmem_object_root *obj)
 {
     struct tmem_pool *pool;
-    struct oid old_oid;
+    struct xen_tmem_oid old_oid;
 
     ASSERT_SPINLOCK(&obj->obj_spinlock);
     ASSERT(obj != NULL);
@@ -946,7 +944,8 @@ static int obj_rb_insert(struct rb_root *root, struct 
tmem_object_root *obj)
  * allocate, initialize, and insert an tmem_object_root
  * (should be called only if find failed)
  */
-static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, struct oid 
*oidp)
+static struct tmem_object_root * obj_alloc(struct tmem_pool *pool,
+                                           struct xen_tmem_oid *oidp)
 {
     struct tmem_object_root *obj;
 
@@ -1531,8 +1530,8 @@ cleanup:
 }
 
 static int do_tmem_put(struct tmem_pool *pool,
-              struct oid *oidp, uint32_t index,
-              xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
+                       struct xen_tmem_oid *oidp, uint32_t index,
+                       xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
 {
     struct tmem_object_root *obj = NULL;
     struct tmem_page_descriptor *pgp = NULL;
@@ -1696,8 +1695,9 @@ unlock_obj:
     return ret;
 }
 
-static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t 
index,
-              xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
+static int do_tmem_get(struct tmem_pool *pool,
+                       struct xen_tmem_oid *oidp, uint32_t index,
+                       xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
 {
     struct tmem_object_root *obj;
     struct tmem_page_descriptor *pgp;
@@ -1774,7 +1774,8 @@ bad_copy:
     return rc;
 }
 
-static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, 
uint32_t index)
+static int do_tmem_flush_page(struct tmem_pool *pool,
+                              struct xen_tmem_oid *oidp, uint32_t index)
 {
     struct tmem_object_root *obj;
     struct tmem_page_descriptor *pgp;
@@ -1807,7 +1808,8 @@ out:
         return 1;
 }
 
-static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp)
+static int do_tmem_flush_object(struct tmem_pool *pool,
+                                struct xen_tmem_oid *oidp)
 {
     struct tmem_object_root *obj;
 
@@ -2432,7 +2434,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t 
pool_id,
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     struct tmem_page_descriptor *pgp;
-    struct oid oid;
+    struct xen_tmem_oid oid;
     int ret = 0;
     struct tmem_handle h;
 
@@ -2525,8 +2527,10 @@ out:
     return ret;
 }
 
-static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, struct oid 
*oidp,
-                      uint32_t index, tmem_cli_va_param_t buf, uint32_t 
bufsize)
+static int tmemc_restore_put_page(int cli_id, uint32_t pool_id,
+                                  struct xen_tmem_oid *oidp,
+                                  uint32_t index, tmem_cli_va_param_t buf,
+                                  uint32_t bufsize)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
@@ -2542,8 +2546,9 @@ static int tmemc_restore_put_page(int cli_id, uint32_t 
pool_id, struct oid *oidp
     return do_tmem_put(pool, oidp, index, 0, buf);
 }
 
-static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid 
*oidp,
-                        uint32_t index)
+static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id,
+                                    struct xen_tmem_oid *oidp,
+                                    uint32_t index)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
@@ -2559,7 +2564,7 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
     int ret;
     uint32_t pool_id = op->pool_id;
     uint32_t cmd = op->cmd;
-    struct oid *oidp = (struct oid *)(&op->oid[0]);
+    struct xen_tmem_oid *oidp = &op->oid;
 
     if ( op->pad != 0 )
         return -EINVAL;
@@ -2633,7 +2638,7 @@ long do_tmem_op(tmem_cli_op_t uops)
     struct tmem_op op;
     struct client *client = current->domain->tmem_client;
     struct tmem_pool *pool = NULL;
-    struct oid *oidp;
+    struct xen_tmem_oid *oidp;
     int rc = 0;
     bool_t succ_get = 0, succ_put = 0;
     bool_t non_succ_get = 0, non_succ_put = 0;
@@ -2714,7 +2719,7 @@ long do_tmem_op(tmem_cli_op_t uops)
             write_unlock(&tmem_rwlock);
             read_lock(&tmem_rwlock);
 
-            oidp = (struct oid *)&op.u.gen.oid[0];
+            oidp = (struct xen_tmem_oid *)&op.u.gen.oid[0];
             switch ( op.cmd )
             {
             case TMEM_NEW_POOL:
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index fc4709a..2d7580b 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -737,6 +737,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
 #define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
 
+struct xen_tmem_oid {
+    uint64_t oid[3];
+};
+typedef struct xen_tmem_oid xen_tmem_oid_t;
+DEFINE_XEN_GUEST_HANDLE(xen_tmem_oid_t);
+
 struct xen_sysctl_tmem_op {
     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
     int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
@@ -746,7 +752,7 @@ struct xen_sysctl_tmem_op {
     uint32_t arg1;      /* IN: If not applicable to command use 0. */
     uint32_t arg2;      /* IN: If not applicable to command use 0. */
     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
-    uint64_t oid[3];    /* IN: If not applicable to command use 0s. */
+    xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
     XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. 
*/
 };
 typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
-- 
2.1.0

>From 54601d2a8e3856621eb107694e7986fb2abcafad Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Date: Mon, 31 Aug 2015 11:00:29 -0400
Subject: [PATCH 2/2] tmem: Use 'struct xen_tmem_oid' for every user.

Patch "tmem: Make the uint64_t oid[3] a proper structure:
xen_tmem_oid" converted the sysctl API to use an
proper structure. But it did not do it for the tmem hypercall.

This expands that and converts the tmem hypercall. For this
to work we define the struct in tmem.h and include it in
sysctl.h.

This change also included work to make the compat layer
happy. That was to declare the struct xen_tmem_oid to be
checked in xlat.lst - which will construct an typedef
in the compat file with the same type, hence allowing
copying of 'oid' member without type issues.

The layout (and size) of this structure in memory for the
'struct tmem_op' (so guest facing) is the same! Verified
via pahole and with 32/64 bit guests.

--- /tmp/old    2015-08-27 16:34:00.535638730 -0400
+++ /tmp/new    2015-08-27 16:34:10.447705328 -0400
@@ -8,7 +8,7 @@
                        uint32_t   arg1;                 /*    28     4 */
                } creat;                                 /*          24 */
                struct {
-                       uint64_t   oid[3];               /*     8    24 */
+                       xen_tmem_oid_t oid;              /*     8    24 */
                        uint32_t   index;                /*    32     4 */
                        uint32_t   tmem_offset;          /*    36     4 */
                        uint32_t   pfn_offset;           /*    40     4 */

Signed-off-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
---
 xen/common/compat/tmem_xen.c | 6 +++---
 xen/include/public/sysctl.h  | 7 +------
 xen/include/public/tmem.h    | 9 +++++++++
 xen/include/xlat.lst         | 1 +
 4 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/xen/common/compat/tmem_xen.c b/xen/common/compat/tmem_xen.c
index 97c7ff2..6bd1ad4 100644
--- a/xen/common/compat/tmem_xen.c
+++ b/xen/common/compat/tmem_xen.c
@@ -11,9 +11,9 @@
 #include <xen/hypercall.h>
 #include <compat/tmem.h>
 
-#define xen_tmem_op tmem_op
-/*CHECK_tmem_op;*/
-#undef xen_tmem_op
+#define xen_xen_tmem_oid xen_tmem_oid
+CHECK_xen_tmem_oid;
+#undef xen_xen_tmem_oid
 
 /*
  * Local variables:
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index 2d7580b..3bdf0e1 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -34,6 +34,7 @@
 #include "xen.h"
 #include "domctl.h"
 #include "physdev.h"
+#include "tmem.h"
 
 #define XEN_SYSCTL_INTERFACE_VERSION 0x0000000C
 
@@ -737,12 +738,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
 #define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
 
-struct xen_tmem_oid {
-    uint64_t oid[3];
-};
-typedef struct xen_tmem_oid xen_tmem_oid_t;
-DEFINE_XEN_GUEST_HANDLE(xen_tmem_oid_t);
-
 struct xen_sysctl_tmem_op {
     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
     int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
diff --git a/xen/include/public/tmem.h b/xen/include/public/tmem.h
index e4ee704..913566a 100644
--- a/xen/include/public/tmem.h
+++ b/xen/include/public/tmem.h
@@ -73,6 +73,11 @@
 #define EFROZEN                 1000
 #define EEMPTY                  1001
 
+struct xen_tmem_oid {
+    uint64_t oid[3];
+};
+typedef struct xen_tmem_oid xen_tmem_oid_t;
+DEFINE_XEN_GUEST_HANDLE(xen_tmem_oid_t);
 
 #ifndef __ASSEMBLY__
 #if __XEN_INTERFACE_VERSION__ < 0x00040400
@@ -89,7 +94,11 @@ struct tmem_op {
             uint32_t arg1;
         } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
         struct {
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
             uint64_t oid[3];
+#else
+            xen_tmem_oid_t oid;
+#endif
             uint32_t index;
             uint32_t tmem_offset;
             uint32_t pfn_offset;
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 8cedee7..ace1d53 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -103,6 +103,7 @@
 !      sched_poll                      sched.h
 ?      sched_remote_shutdown           sched.h
 ?      sched_shutdown                  sched.h
+?      xen_tmem_oid                    tmem.h
 !      tmem_op                         tmem.h
 ?      t_buf                           trace.h
 ?      vcpu_get_physid                 vcpu.h
-- 
2.1.0

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to