Hey,

I was wondering if there is an interest in exposing those
via xentop?

I wrote a hack patch (see attached) that expose this which helped
me figure out what guests are doing when their CPU consumption
time is low. As in whether they are truly 'halted' or
if they are preempted by the hypervisor or other guests
(b/c I may have pinned _ALL_ guest VCPUs on the same pCPU).

Of course I am not proposing the patches as they are.

>From ee7abce84ea4e4fbaad9c03793d9b75e07e15ca3 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Date: Fri, 4 Apr 2014 15:24:22 -0400
Subject: [PATCH 1/2] xen/xentop: Include RUNNABLE, OFFLINE, and BLOCKED stats

Signed-off-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
---
 tools/libxc/xc_domain.c                     |  26 ++++++
 tools/libxc/xenctrl.h                       |   6 ++
 tools/xenstat/libxenstat/src/xenstat.c      |  24 +++++-
 tools/xenstat/libxenstat/src/xenstat.h      |   3 +
 tools/xenstat/libxenstat/src/xenstat_priv.h |   3 +
 tools/xenstat/xentop/xentop.c               | 129 ++++++++++++++++++++++------
 xen/common/domctl.c                         |  63 ++++++++++++++
 xen/common/sysctl.c                         |  43 ++++++++++
 xen/include/public/domctl.h                 |  47 ++++++++++
 xen/include/public/sysctl.h                 |  13 +++
 xen/include/xen/domain.h                    |   2 +
 11 files changed, 332 insertions(+), 27 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 369c3f3..028eb4f 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -281,6 +281,32 @@ int xc_vcpu_getaffinity(xc_interface *xch,
 out:
     return ret;
 }
+int xc_domain_getinfolist2(xc_interface *xch,
+                          uint32_t first_domain,
+                          unsigned int max_domains,
+                          xc_domaininfo2_t *info)
+{
+    int ret = 0;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(info, max_domains*sizeof(*info), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, info) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_getdomaininfolist2;
+    sysctl.u.getdomaininfolist.first_domain = first_domain;
+    sysctl.u.getdomaininfolist.max_domains  = max_domains;
+    set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
+
+    if ( xc_sysctl(xch, &sysctl) < 0 )
+        ret = -1;
+    else
+        ret = sysctl.u.getdomaininfolist.num_domains;
+
+    xc_hypercall_bounce_post(xch, info);
+
+    return ret;
+}
 
 int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
                               unsigned int *guest_width)
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 42d3133..2100e8f 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -420,6 +420,7 @@ typedef struct xc_dominfo {
 } xc_dominfo_t;
 
 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
+typedef xen_domctl_getdomaininfo2_t xc_domaininfo2_t;
 
 typedef union 
 {
@@ -625,6 +626,11 @@ int xc_domain_getinfo(xc_interface *xch,
                       xc_dominfo_t *info);
 
 
+int xc_domain_getinfolist2(xc_interface *xch,
+                          uint32_t first_domain,
+                          unsigned int max_domains,
+                          xc_domaininfo2_t *info);
+
 /**
  * This function will set the execution context for the specified vcpu.
  *
diff --git a/tools/xenstat/libxenstat/src/xenstat.c 
b/tools/xenstat/libxenstat/src/xenstat.c
index e5facb8..936cb02 100644
--- a/tools/xenstat/libxenstat/src/xenstat.c
+++ b/tools/xenstat/libxenstat/src/xenstat.c
@@ -163,7 +163,7 @@ xenstat_node *xenstat_get_node(xenstat_handle * handle, 
unsigned int flags)
 #define DOMAIN_CHUNK_SIZE 256
        xenstat_node *node;
        xc_physinfo_t physinfo = { 0 };
-       xc_domaininfo_t domaininfo[DOMAIN_CHUNK_SIZE];
+       xc_domaininfo2_t domaininfo[DOMAIN_CHUNK_SIZE];
        unsigned int new_domains;
        unsigned int i;
 
@@ -204,7 +204,7 @@ xenstat_node *xenstat_get_node(xenstat_handle * handle, 
unsigned int flags)
        do {
                xenstat_domain *domain, *tmp;
 
-               new_domains = xc_domain_getinfolist(handle->xc_handle,
+               new_domains = xc_domain_getinfolist2(handle->xc_handle,
                                                    node->num_domains, 
                                                    DOMAIN_CHUNK_SIZE, 
                                                    domaininfo);
@@ -244,6 +244,9 @@ xenstat_node *xenstat_get_node(xenstat_handle * handle, 
unsigned int flags)
                        }
                        domain->state = domaininfo[i].flags;
                        domain->cpu_ns = domaininfo[i].cpu_time;
+                       domain->cpu_blocked_ns = domaininfo[i].cpu_time_blocked;
+                       domain->cpu_offline_ns = domaininfo[i].cpu_time_offline;
+                       domain->cpu_runnable_ns = 
domaininfo[i].cpu_time_runnable;
                        domain->num_vcpus = (domaininfo[i].max_vcpu_id+1);
                        domain->vcpus = NULL;
                        domain->cur_mem =
@@ -379,6 +382,18 @@ unsigned long long xenstat_domain_cpu_ns(xenstat_domain * 
domain)
 {
        return domain->cpu_ns;
 }
+unsigned long long xenstat_domain_cpu_offline_ns(xenstat_domain * domain)
+{
+       return domain->cpu_offline_ns;
+}
+unsigned long long xenstat_domain_cpu_runnable_ns(xenstat_domain * domain)
+{
+       return domain->cpu_runnable_ns;
+}
+unsigned long long xenstat_domain_cpu_blocked_ns(xenstat_domain * domain)
+{
+       return domain->cpu_blocked_ns;
+}
 
 /* Find the number of VCPUs for a domain */
 unsigned int xenstat_domain_num_vcpus(xenstat_domain * domain)
@@ -546,6 +561,11 @@ unsigned long long xenstat_vcpu_ns(xenstat_vcpu * vcpu)
 {
        return vcpu->ns;
 }
+/* Get VCPU usage */
+unsigned long long xenstat_vcpu_offline_ns(xenstat_vcpu * vcpu)
+{
+       return vcpu->ns;
+}
 
 /*
  * Network functions
diff --git a/tools/xenstat/libxenstat/src/xenstat.h 
b/tools/xenstat/libxenstat/src/xenstat.h
index 47ec60e..a7c70ab 100644
--- a/tools/xenstat/libxenstat/src/xenstat.h
+++ b/tools/xenstat/libxenstat/src/xenstat.h
@@ -94,6 +94,9 @@ char *xenstat_domain_name(xenstat_domain * domain);
 
 /* Get information about how much CPU time has been used */
 unsigned long long xenstat_domain_cpu_ns(xenstat_domain * domain);
+unsigned long long xenstat_domain_cpu_blocked_ns(xenstat_domain * domain);
+unsigned long long xenstat_domain_cpu_runnable_ns(xenstat_domain * domain);
+unsigned long long xenstat_domain_cpu_offline_ns(xenstat_domain * domain);
 
 /* Find the number of VCPUs allocated to a domain */
 unsigned int xenstat_domain_num_vcpus(xenstat_domain * domain);
diff --git a/tools/xenstat/libxenstat/src/xenstat_priv.h 
b/tools/xenstat/libxenstat/src/xenstat_priv.h
index 8490e23..5c257bb 100644
--- a/tools/xenstat/libxenstat/src/xenstat_priv.h
+++ b/tools/xenstat/libxenstat/src/xenstat_priv.h
@@ -64,6 +64,9 @@ struct xenstat_domain {
        char *name;
        unsigned int state;
        unsigned long long cpu_ns;
+       unsigned long long cpu_blocked_ns;
+       unsigned long long cpu_offline_ns;
+       unsigned long long cpu_runnable_ns;
        unsigned int num_vcpus;         /* No. vcpus configured for domain */
        xenstat_vcpu *vcpus;            /* Array of length num_vcpus */
        unsigned long long cur_mem;     /* Current memory reservation */
diff --git a/tools/xenstat/xentop/xentop.c b/tools/xenstat/xentop/xentop.c
index dd11927..88433e9 100644
--- a/tools/xenstat/xentop/xentop.c
+++ b/tools/xenstat/xentop/xentop.c
@@ -97,6 +97,12 @@ static int compare_cpu(xenstat_domain *domain1, 
xenstat_domain *domain2);
 static void print_cpu(xenstat_domain *domain);
 static int compare_cpu_pct(xenstat_domain *domain1, xenstat_domain *domain2);
 static void print_cpu_pct(xenstat_domain *domain);
+static int compare_cpu_offline_pct(xenstat_domain *domain1, xenstat_domain 
*domain2);
+static void print_cpu_offline_pct(xenstat_domain *domain);
+static int compare_cpu_blocked_pct(xenstat_domain *domain1, xenstat_domain 
*domain2);
+static void print_cpu_blocked_pct(xenstat_domain *domain);
+static int compare_cpu_runnable_pct(xenstat_domain *domain1, xenstat_domain 
*domain2);
+static void print_cpu_runnable_pct(xenstat_domain *domain);
 static int compare_mem(xenstat_domain *domain1, xenstat_domain *domain2);
 static void print_mem(xenstat_domain *domain);
 static void print_mem_pct(xenstat_domain *domain);
@@ -146,6 +152,9 @@ typedef enum field_id {
        FIELD_STATE,
        FIELD_CPU,
        FIELD_CPU_PCT,
+       FIELD_CPU_OFFLINE_PCT,
+       FIELD_CPU_BLOCKED_PCT,
+       FIELD_CPU_RUNNABLE_PCT,
        FIELD_MEM,
        FIELD_MEM_PCT,
        FIELD_MAXMEM,
@@ -167,30 +176,34 @@ typedef struct field {
        field_id num;
        const char *header;
        unsigned int default_width;
+       unsigned int default_on;
        int (*compare)(xenstat_domain *domain1, xenstat_domain *domain2);
        void (*print)(xenstat_domain *domain);
 } field;
 
 field fields[] = {
-       { FIELD_NAME,      "NAME",      10, compare_name,      print_name    },
-       { FIELD_STATE,     "STATE",      6, compare_state,     print_state   },
-       { FIELD_CPU,       "CPU(sec)",  10, compare_cpu,       print_cpu     },
-       { FIELD_CPU_PCT,   "CPU(%)",     6, compare_cpu_pct,   print_cpu_pct },
-       { FIELD_MEM,       "MEM(k)",    10, compare_mem,       print_mem     },
-       { FIELD_MEM_PCT,   "MEM(%)",     6, compare_mem,       print_mem_pct },
-       { FIELD_MAXMEM,    "MAXMEM(k)", 10, compare_maxmem,    print_maxmem  },
-       { FIELD_MAX_PCT,   "MAXMEM(%)",  9, compare_maxmem,    print_max_pct },
-       { FIELD_VCPUS,     "VCPUS",      5, compare_vcpus,     print_vcpus   },
-       { FIELD_NETS,      "NETS",       4, compare_nets,      print_nets    },
-       { FIELD_NET_TX,    "NETTX(k)",   8, compare_net_tx,    print_net_tx  },
-       { FIELD_NET_RX,    "NETRX(k)",   8, compare_net_rx,    print_net_rx  },
-       { FIELD_VBDS,      "VBDS",       4, compare_vbds,      print_vbds    },
-       { FIELD_VBD_OO,    "VBD_OO",     8, compare_vbd_oo,    print_vbd_oo  },
-       { FIELD_VBD_RD,    "VBD_RD",     8, compare_vbd_rd,    print_vbd_rd  },
-       { FIELD_VBD_WR,    "VBD_WR",     8, compare_vbd_wr,    print_vbd_wr  },
-       { FIELD_VBD_RSECT, "VBD_RSECT", 10, compare_vbd_rsect, print_vbd_rsect  
},
-       { FIELD_VBD_WSECT, "VBD_WSECT", 10, compare_vbd_wsect, print_vbd_wsect  
},
-       { FIELD_SSID,      "SSID",       4, compare_ssid,      print_ssid    }
+       { FIELD_NAME,      "NAME",      10, 1, compare_name,      print_name    
},
+       { FIELD_STATE,     "STATE",      6, 1, compare_state,     print_state   
},
+       { FIELD_CPU,       "CPU(sec)",  10, 1, compare_cpu,       print_cpu     
},
+       { FIELD_CPU_PCT,   "CPU(%)",     6, 1, compare_cpu_pct,   print_cpu_pct 
},
+       { FIELD_CPU_OFFLINE_PCT,   "CPU_O(%)",     6, 0, 
compare_cpu_offline_pct,   print_cpu_offline_pct },
+       { FIELD_CPU_BLOCKED_PCT,   "CPU_B(%)",     6, 0, 
compare_cpu_blocked_pct,   print_cpu_blocked_pct },
+       { FIELD_CPU_RUNNABLE_PCT,   "CPU_R(%)",     6, 0, 
compare_cpu_runnable_pct,   print_cpu_runnable_pct },
+       { FIELD_MEM,       "MEM(k)",    10, 1, compare_mem,       print_mem     
},
+       { FIELD_MEM_PCT,   "MEM(%)",     6, 1, compare_mem,       print_mem_pct 
},
+       { FIELD_MAXMEM,    "MAXMEM(k)", 10, 1, compare_maxmem,    print_maxmem  
},
+       { FIELD_MAX_PCT,   "MAXMEM(%)",  9, 1, compare_maxmem,    print_max_pct 
},
+       { FIELD_VCPUS,     "VCPUS",      5, 1, compare_vcpus,     print_vcpus   
},
+       { FIELD_NETS,      "NETS",       4, 1, compare_nets,      print_nets    
},
+       { FIELD_NET_TX,    "NETTX(k)",   8, 1, compare_net_tx,    print_net_tx  
},
+       { FIELD_NET_RX,    "NETRX(k)",   8, 1, compare_net_rx,    print_net_rx  
},
+       { FIELD_VBDS,      "VBDS",       4, 1, compare_vbds,      print_vbds    
},
+       { FIELD_VBD_OO,    "VBD_OO",     8, 1, compare_vbd_oo,    print_vbd_oo  
},
+       { FIELD_VBD_RD,    "VBD_RD",     8, 1, compare_vbd_rd,    print_vbd_rd  
},
+       { FIELD_VBD_WR,    "VBD_WR",     8, 1, compare_vbd_wr,    print_vbd_wr  
},
+       { FIELD_VBD_RSECT, "VBD_RSECT", 10, 1, compare_vbd_rsect, 
print_vbd_rsect  },
+       { FIELD_VBD_WSECT, "VBD_WSECT", 10, 1, compare_vbd_wsect, 
print_vbd_wsect  },
+       { FIELD_SSID,      "SSID",       4, 1, compare_ssid,      print_ssid    
}
 };
 
 const unsigned int NUM_FIELDS = sizeof(fields)/sizeof(field);
@@ -212,6 +225,7 @@ int show_vbds = 0;
 int show_tmem = 0;
 int repeat_header = 0;
 int show_full_name = 0;
+int show_runnable = 0;
 #define PROMPT_VAL_LEN 80
 char *prompt = NULL;
 char prompt_val[PROMPT_VAL_LEN];
@@ -238,6 +252,7 @@ static void usage(const char *program)
               "-x, --vbds           output vbd block device data\n"
               "-r, --repeat-header  repeat table header before each domain\n"
               "-v, --vcpus          output vcpu data\n"
+              "-R, --runstates      output CPU runstates\n"
               "-b, --batch          output in batch mode, no user input 
accepted\n"
               "-i, --iterations     number of iterations before exiting\n"
               "-f, --full-name      output the full domain name (not 
truncated)\n"
@@ -499,11 +514,19 @@ static void print_cpu(xenstat_domain *domain)
        print("%10llu", xenstat_domain_cpu_ns(domain)/1000000000);
 }
 
+enum _type {
+       running = 0,
+       offline,
+       blocked,
+       runnable
+};
+
 /* Computes the CPU percentage used for a specified domain */
-static double get_cpu_pct(xenstat_domain *domain)
+static double get_cpu_pct(xenstat_domain *domain, int type)
 {
        xenstat_domain *old_domain;
        double us_elapsed;
+       double prev, next;
 
        /* Can't calculate CPU percentage without a previous sample. */
        if(prev_node == NULL)
@@ -520,19 +543,67 @@ static double get_cpu_pct(xenstat_domain *domain)
        /* In the following, nanoseconds must be multiplied by 1000.0 to
         * convert to microseconds, then divided by 100.0 to get a percentage,
         * resulting in a multiplication by 10.0 */
-       return ((xenstat_domain_cpu_ns(domain)
-                -xenstat_domain_cpu_ns(old_domain))/10.0)/us_elapsed;
+       switch (type) {
+       case offline: prev = xenstat_domain_cpu_offline_ns(old_domain);
+                       next = xenstat_domain_cpu_offline_ns(domain);
+               break;
+       case blocked: prev = xenstat_domain_cpu_blocked_ns(old_domain);
+                       next = xenstat_domain_cpu_blocked_ns(domain);
+               break;
+       case runnable: prev = xenstat_domain_cpu_runnable_ns(old_domain);
+                       next = xenstat_domain_cpu_runnable_ns(domain);
+               break;
+       case running: 
+       default:
+               prev = xenstat_domain_cpu_ns(old_domain);
+                       next = xenstat_domain_cpu_ns(domain);
+               break;
+       }       
+       return ((next - prev)/10.0)/us_elapsed;
 }
 
 static int compare_cpu_pct(xenstat_domain *domain1, xenstat_domain *domain2)
 {
-       return -compare(get_cpu_pct(domain1), get_cpu_pct(domain2));
+       return -compare(get_cpu_pct(domain1, running), get_cpu_pct(domain2, 
running));
 }
 
 /* Prints cpu percentage statistic */
 static void print_cpu_pct(xenstat_domain *domain)
 {
-       print("%6.1f", get_cpu_pct(domain));
+       print("%6.1f", get_cpu_pct(domain, running));
+}
+
+static int compare_cpu_runnable_pct(xenstat_domain *domain1, xenstat_domain 
*domain2)
+{
+       return -compare(get_cpu_pct(domain1, runnable), get_cpu_pct(domain2, 
runnable));
+}
+
+/* Prints cpu percentage statistic */
+static void print_cpu_runnable_pct(xenstat_domain *domain)
+{
+       print("%6.1f", get_cpu_pct(domain, runnable));
+}
+
+static int compare_cpu_blocked_pct(xenstat_domain *domain1, xenstat_domain 
*domain2)
+{
+       return -compare(get_cpu_pct(domain1, blocked), get_cpu_pct(domain2, 
blocked));
+}
+
+/* Prints cpu percentage statistic */
+static void print_cpu_blocked_pct(xenstat_domain *domain)
+{
+       print("%6.1f", get_cpu_pct(domain, blocked));
+}
+
+static int compare_cpu_offline_pct(xenstat_domain *domain1, xenstat_domain 
*domain2)
+{
+       return -compare(get_cpu_pct(domain1, offline), get_cpu_pct(domain2, 
offline));
+}
+
+/* Prints cpu percentage statistic */
+static void print_cpu_offline_pct(xenstat_domain *domain)
+{
+       print("%6.1f", get_cpu_pct(domain, offline));
 }
 
 /* Compares current memory of two domains, returning -1,0,1 for <,=,> */
@@ -871,6 +942,8 @@ void do_header(void)
        for(i = 0; i < NUM_FIELDS; i++) {
                if (i != 0)
                        print(" ");
+               if (!fields[i].default_on && !show_runnable)
+                       continue;
                /* The BOLD attribute is turned on for the sort column */
                if (i == sort_field)
                        xentop_attron(A_BOLD);
@@ -933,6 +1006,8 @@ void do_domain(xenstat_domain *domain)
        for (i = 0; i < NUM_FIELDS; i++) {
                if (i != 0)
                        print(" ");
+               if (!fields[i].default_on && !show_runnable)
+                       continue;
                if (i == sort_field)
                        xentop_attron(A_BOLD);
                fields[i].print(domain);
@@ -1133,9 +1208,10 @@ int main(int argc, char **argv)
                { "batch",         no_argument,       NULL, 'b' },
                { "iterations",    required_argument, NULL, 'i' },
                { "full-name",     no_argument,       NULL, 'f' },
+               { "runnable",      no_argument,        NULL, 'R' },
                { 0, 0, 0, 0 },
        };
-       const char *sopts = "hVnxrvd:bi:f";
+       const char *sopts = "hVnxrRvd:bi:f";
 
        if (atexit(cleanup) != 0)
                fail("Failed to install cleanup handler.\n");
@@ -1174,6 +1250,9 @@ int main(int argc, char **argv)
                        iterations = atoi(optarg);
                        loop = 0;
                        break;
+               case 'R':
+                       show_runnable = 1;
+                       break;
                case 'f':
                        show_full_name = 1;
                        break;
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 7cf610a..83d8188 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -211,6 +211,69 @@ void getdomaininfo(struct domain *d, struct 
xen_domctl_getdomaininfo *info)
 
     memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
 }
+void getdomaininfo2(struct domain *d, struct xen_domctl_getdomaininfo2 *info)
+{
+    struct vcpu *v;
+    u64 cpu_time = 0;
+    u64 blocked_time = 0;
+    u64 offline_time = 0;
+    u64 runnable_time = 0;
+    int flags = XEN_DOMINF_blocked;
+    struct vcpu_runstate_info runstate;
+    
+    info->domain = d->domain_id;
+    info->nr_online_vcpus = 0;
+    info->ssidref = 0;
+    
+    /* 
+     * - domain is marked as blocked only if all its vcpus are blocked
+     * - domain is marked as running if any of its vcpus is running
+     */
+    for_each_vcpu ( d, v )
+    {
+        vcpu_runstate_get(v, &runstate);
+        cpu_time += runstate.time[RUNSTATE_running];
+        blocked_time += runstate.time[RUNSTATE_blocked];
+        offline_time += runstate.time[RUNSTATE_offline];
+        runnable_time += runstate.time[RUNSTATE_runnable];
+        info->max_vcpu_id = v->vcpu_id;
+        if ( !test_bit(_VPF_down, &v->pause_flags) )
+        {
+            if ( !(v->pause_flags & VPF_blocked) )
+                flags &= ~XEN_DOMINF_blocked;
+            if ( v->is_running )
+                flags |= XEN_DOMINF_running;
+            info->nr_online_vcpus++;
+        }
+    }
+
+    info->cpu_time = cpu_time;
+    info->cpu_time_offline = offline_time;
+    info->cpu_time_blocked = blocked_time;
+    info->cpu_time_runnable = runnable_time;
+
+    info->flags = (info->nr_online_vcpus ? flags : 0) |
+        ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying    : 0) |
+        (d->is_shut_down                ? XEN_DOMINF_shutdown : 0) |
+        (d->is_paused_by_controller     ? XEN_DOMINF_paused   : 0) |
+        (d->debugger_attached           ? XEN_DOMINF_debugged : 0) |
+        d->shutdown_code << XEN_DOMINF_shutdownshift;
+
+    if ( is_hvm_domain(d) )
+        info->flags |= XEN_DOMINF_hvm_guest;
+
+    //xsm_security_domaininfo(d, info);
+
+    info->tot_pages         = d->tot_pages;
+    info->max_pages         = d->max_pages;
+    info->shr_pages         = atomic_read(&d->shr_pages);
+    info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
+    BUG_ON(SHARED_M2P(info->shared_info_frame));
+
+    info->cpupool = d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
+
+    memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
+}
 
 static unsigned int default_vcpu0_location(cpumask_t *online)
 {
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index 329e9d0..944410f 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -113,6 +113,49 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) 
u_sysctl)
         op->u.getdomaininfolist.num_domains = num_domains;
     }
     break;
+    case XEN_SYSCTL_getdomaininfolist2:
+    { 
+        struct domain *d;
+        struct xen_domctl_getdomaininfo2 info;
+        u32 num_domains = 0;
+
+        rcu_read_lock(&domlist_read_lock);
+
+        for_each_domain ( d )
+        {
+            if ( d->domain_id < op->u.getdomaininfolist.first_domain )
+                continue;
+            if ( num_domains == op->u.getdomaininfolist.max_domains )
+                break;
+
+            /* HACK!
+            ret = xsm_getdomaininfo(d);
+            if ( ret )
+                continue;
+            */
+            getdomaininfo2(d, &info);
+
+            if ( copy_to_guest_offset(op->u.getdomaininfolist2.buffer,
+                                      num_domains, &info, 1) )
+            {
+                ret = -EFAULT;
+                break;
+            }
+            
+            num_domains++;
+        }
+        
+        rcu_read_unlock(&domlist_read_lock);
+        
+        if ( ret != 0 )
+            break;
+        
+        op->u.getdomaininfolist.num_domains = num_domains;
+
+        if ( copy_to_guest(u_sysctl, op, 1) )
+            ret = -EFAULT;
+    }
+    break;
 
 #ifdef PERF_COUNTERS
     case XEN_SYSCTL_perfc_op:
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index f22fe2e..c3f6089 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -115,6 +115,52 @@ struct xen_domctl_getdomaininfo {
 typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
 
+/* XEN_DOMCTL_getdomaininfo */
+struct xen_domctl_getdomaininfo2 {
+    /* OUT variables. */
+    domid_t  domain;              /* Also echoed in domctl.domain */
+ /* Domain is scheduled to die. */
+#define _XEN_DOMINF_dying     0
+#define XEN_DOMINF_dying      (1U<<_XEN_DOMINF_dying)
+ /* Domain is an HVM guest (as opposed to a PV guest). */
+#define _XEN_DOMINF_hvm_guest 1
+#define XEN_DOMINF_hvm_guest  (1U<<_XEN_DOMINF_hvm_guest)
+ /* The guest OS has shut down. */
+#define _XEN_DOMINF_shutdown  2
+#define XEN_DOMINF_shutdown   (1U<<_XEN_DOMINF_shutdown)
+ /* Currently paused by control software. */
+#define _XEN_DOMINF_paused    3
+#define XEN_DOMINF_paused     (1U<<_XEN_DOMINF_paused)
+ /* Currently blocked pending an event.     */
+#define _XEN_DOMINF_blocked   4
+#define XEN_DOMINF_blocked    (1U<<_XEN_DOMINF_blocked)
+ /* Domain is currently running.            */
+#define _XEN_DOMINF_running   5
+#define XEN_DOMINF_running    (1U<<_XEN_DOMINF_running)
+ /* Being debugged.  */
+#define _XEN_DOMINF_debugged  6
+#define XEN_DOMINF_debugged   (1U<<_XEN_DOMINF_debugged)
+ /* XEN_DOMINF_shutdown guest-supplied code.  */
+#define XEN_DOMINF_shutdownmask 255
+#define XEN_DOMINF_shutdownshift 16
+    uint32_t flags;              /* XEN_DOMINF_* */
+    uint64_aligned_t tot_pages;
+    uint64_aligned_t max_pages;
+    uint64_aligned_t shr_pages;
+    uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
+    uint64_aligned_t cpu_time;
+    uint64_aligned_t cpu_time_blocked;
+    uint64_aligned_t cpu_time_runnable;
+    uint64_aligned_t cpu_time_offline;
+    uint32_t nr_online_vcpus;    /* Number of VCPUs currently online. */
+    uint32_t max_vcpu_id;        /* Maximum VCPUID in use by this domain. */
+    uint32_t ssidref;
+    xen_domain_handle_t handle;
+    uint32_t cpupool;
+};
+typedef struct xen_domctl_getdomaininfo2 xen_domctl_getdomaininfo2_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo2_t);
+
 
 /* XEN_DOMCTL_getmemlist */
 struct xen_domctl_getmemlist {
@@ -975,6 +1021,7 @@ struct xen_domctl {
     union {
         struct xen_domctl_createdomain      createdomain;
         struct xen_domctl_getdomaininfo     getdomaininfo;
+        struct xen_domctl_getdomaininfo2     getdomaininfo2;
         struct xen_domctl_getmemlist        getmemlist;
         struct xen_domctl_getpageframeinfo  getpageframeinfo;
         struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index eecb839..ff9be43 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -161,6 +161,17 @@ struct xen_sysctl_getdomaininfolist {
 typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
 
+struct xen_sysctl_getdomaininfolist2 {
+    /* IN variables. */
+    domid_t               first_domain;
+    uint32_t              max_domains;
+    XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo2_t) buffer;
+    /* OUT variables. */
+    uint32_t              num_domains;
+};
+typedef struct xen_sysctl_getdomaininfolist2 xen_sysctl_getdomaininfolist2_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist2_t);
+
 /* Inject debug keys into Xen. */
 /* XEN_SYSCTL_debug_keys */
 struct xen_sysctl_debug_keys {
@@ -824,6 +835,7 @@ struct xen_sysctl {
  * it.
  */
 #define XEN_SYSCTL_xsplice_op                    27
+#define XEN_SYSCTL_getdomaininfolist2            28
     uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
     union {
         struct xen_sysctl_readconsole       readconsole;
@@ -846,6 +858,7 @@ struct xen_sysctl {
         struct xen_sysctl_scheduler_op      scheduler_op;
         struct xen_sysctl_coverage_op       coverage_op;
         struct xen_sysctl_xsplice_op        xsplice;
+        struct xen_sysctl_getdomaininfolist2 getdomaininfolist2;
         uint8_t                             pad[128];
     } u;
 };
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index a057069..2868363 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -16,7 +16,9 @@ struct vcpu *alloc_dom0_vcpu0(void);
 int vcpu_reset(struct vcpu *);
 
 struct xen_domctl_getdomaininfo;
+struct xen_domctl_getdomaininfo2;
 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info);
+void getdomaininfo2(struct domain *d, struct xen_domctl_getdomaininfo2 *info);
 
 /*
  * Arch-specifics.
-- 
2.4.3

>From e48a447d505036089c1544cde2ab9648eabf595c Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Date: Tue, 8 Apr 2014 11:07:36 -0400
Subject: [PATCH 2/2] xentop: When running with -R don't output everything.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
---
 tools/xenstat/xentop/xentop.c | 49 ++++++++++++++++++++++++-------------------
 1 file changed, 27 insertions(+), 22 deletions(-)

diff --git a/tools/xenstat/xentop/xentop.c b/tools/xenstat/xentop/xentop.c
index 88433e9..b6a1797 100644
--- a/tools/xenstat/xentop/xentop.c
+++ b/tools/xenstat/xentop/xentop.c
@@ -179,31 +179,32 @@ typedef struct field {
        unsigned int default_on;
        int (*compare)(xenstat_domain *domain1, xenstat_domain *domain2);
        void (*print)(xenstat_domain *domain);
+    unsigned int optional;
 } field;
 
 field fields[] = {
-       { FIELD_NAME,      "NAME",      10, 1, compare_name,      print_name    
},
-       { FIELD_STATE,     "STATE",      6, 1, compare_state,     print_state   
},
-       { FIELD_CPU,       "CPU(sec)",  10, 1, compare_cpu,       print_cpu     
},
-       { FIELD_CPU_PCT,   "CPU(%)",     6, 1, compare_cpu_pct,   print_cpu_pct 
},
-       { FIELD_CPU_OFFLINE_PCT,   "CPU_O(%)",     6, 0, 
compare_cpu_offline_pct,   print_cpu_offline_pct },
-       { FIELD_CPU_BLOCKED_PCT,   "CPU_B(%)",     6, 0, 
compare_cpu_blocked_pct,   print_cpu_blocked_pct },
-       { FIELD_CPU_RUNNABLE_PCT,   "CPU_R(%)",     6, 0, 
compare_cpu_runnable_pct,   print_cpu_runnable_pct },
-       { FIELD_MEM,       "MEM(k)",    10, 1, compare_mem,       print_mem     
},
-       { FIELD_MEM_PCT,   "MEM(%)",     6, 1, compare_mem,       print_mem_pct 
},
-       { FIELD_MAXMEM,    "MAXMEM(k)", 10, 1, compare_maxmem,    print_maxmem  
},
-       { FIELD_MAX_PCT,   "MAXMEM(%)",  9, 1, compare_maxmem,    print_max_pct 
},
-       { FIELD_VCPUS,     "VCPUS",      5, 1, compare_vcpus,     print_vcpus   
},
-       { FIELD_NETS,      "NETS",       4, 1, compare_nets,      print_nets    
},
-       { FIELD_NET_TX,    "NETTX(k)",   8, 1, compare_net_tx,    print_net_tx  
},
-       { FIELD_NET_RX,    "NETRX(k)",   8, 1, compare_net_rx,    print_net_rx  
},
-       { FIELD_VBDS,      "VBDS",       4, 1, compare_vbds,      print_vbds    
},
-       { FIELD_VBD_OO,    "VBD_OO",     8, 1, compare_vbd_oo,    print_vbd_oo  
},
-       { FIELD_VBD_RD,    "VBD_RD",     8, 1, compare_vbd_rd,    print_vbd_rd  
},
-       { FIELD_VBD_WR,    "VBD_WR",     8, 1, compare_vbd_wr,    print_vbd_wr  
},
-       { FIELD_VBD_RSECT, "VBD_RSECT", 10, 1, compare_vbd_rsect, 
print_vbd_rsect  },
-       { FIELD_VBD_WSECT, "VBD_WSECT", 10, 1, compare_vbd_wsect, 
print_vbd_wsect  },
-       { FIELD_SSID,      "SSID",       4, 1, compare_ssid,      print_ssid    
}
+       { FIELD_NAME,      "NAME",      10, 1, compare_name,      print_name    
, 0},
+       { FIELD_STATE,     "STATE",      6, 1, compare_state,     print_state   
, 0},
+       { FIELD_CPU,       "CPU(sec)",  10, 1, compare_cpu,       print_cpu     
, 0},
+       { FIELD_CPU_PCT,   "CPU(%)",     6, 1, compare_cpu_pct,   print_cpu_pct 
, 0},
+       { FIELD_CPU_OFFLINE_PCT,   "CPU_O(%)",     6, 0, 
compare_cpu_offline_pct,   print_cpu_offline_pct , 0},
+       { FIELD_CPU_BLOCKED_PCT,   "CPU_B(%)",     6, 0, 
compare_cpu_blocked_pct,   print_cpu_blocked_pct , 0},
+       { FIELD_CPU_RUNNABLE_PCT,   "CPU_R(%)",     6, 0, 
compare_cpu_runnable_pct,   print_cpu_runnable_pct , 0},
+       { FIELD_MEM,       "MEM(k)",    10, 1, compare_mem,       print_mem     
, 1},
+       { FIELD_MEM_PCT,   "MEM(%)",     6, 1, compare_mem,       print_mem_pct 
, 1},
+       { FIELD_MAXMEM,    "MAXMEM(k)", 10, 1, compare_maxmem,    print_maxmem  
, 1},
+       { FIELD_MAX_PCT,   "MAXMEM(%)",  9, 1, compare_maxmem,    print_max_pct 
, 1},
+       { FIELD_VCPUS,     "VCPUS",      5, 1, compare_vcpus,     print_vcpus   
, 0},
+       { FIELD_NETS,      "NETS",       4, 1, compare_nets,      print_nets    
, 1},
+       { FIELD_NET_TX,    "NETTX(k)",   8, 1, compare_net_tx,    print_net_tx  
, 1},
+       { FIELD_NET_RX,    "NETRX(k)",   8, 1, compare_net_rx,    print_net_rx  
, 1},
+       { FIELD_VBDS,      "VBDS",       4, 1, compare_vbds,      print_vbds    
, 1},
+       { FIELD_VBD_OO,    "VBD_OO",     8, 1, compare_vbd_oo,    print_vbd_oo  
, 1},
+       { FIELD_VBD_RD,    "VBD_RD",     8, 1, compare_vbd_rd,    print_vbd_rd  
, 1},
+       { FIELD_VBD_WR,    "VBD_WR",     8, 1, compare_vbd_wr,    print_vbd_wr  
, 1},
+       { FIELD_VBD_RSECT, "VBD_RSECT", 10, 1, compare_vbd_rsect, 
print_vbd_rsect  , 1},
+       { FIELD_VBD_WSECT, "VBD_WSECT", 10, 1, compare_vbd_wsect, 
print_vbd_wsect  , 1},
+       { FIELD_SSID,      "SSID",       4, 1, compare_ssid,      print_ssid    
, 1}
 };
 
 const unsigned int NUM_FIELDS = sizeof(fields)/sizeof(field);
@@ -944,6 +945,8 @@ void do_header(void)
                        print(" ");
                if (!fields[i].default_on && !show_runnable)
                        continue;
+               if (fields[i].optional && show_runnable)
+                       continue;
                /* The BOLD attribute is turned on for the sort column */
                if (i == sort_field)
                        xentop_attron(A_BOLD);
@@ -1008,6 +1011,8 @@ void do_domain(xenstat_domain *domain)
                        print(" ");
                if (!fields[i].default_on && !show_runnable)
                        continue;
+               if (fields[i].optional && show_runnable)
+                       continue;
                if (i == sort_field)
                        xentop_attron(A_BOLD);
                fields[i].print(domain);
-- 
2.4.3

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to