Update 'book3s_hv_pmu.c' to add five new perf-events mapped to the five
Hostwide counters. Since these newly introduced perf events are at system
wide scope and can be read from any L1-Lpar CPU, 'kvmppv_pmu's scope and
capabilities are updated appropriately.

Also introduce two new helpers. First is kvmppc_update_l0_stats() that uses
the infrastructure introduced in previous patches to issues the
H_GUEST_GET_STATE hcall L0-PowerVM to fetch guest-state-buffer holding the
latest values of these counters which is then parsed and 'l0_stats'
variable updated.

Second helper is kvmppc_pmu_event_update() which is called from
'kvmppv_pmu' callbacks and uses kvmppc_update_l0_stats() to update
'l0_stats' and the update the 'struct perf_event's event-counter.

Some minor updates to kvmppc_pmu_{add, del, read}() to remove some debug
scaffolding code.

Signed-off-by: Vaibhav Jain <vaib...@linux.ibm.com>
---
 arch/powerpc/kvm/book3s_hv_pmu.c | 92 +++++++++++++++++++++++++++++++-
 1 file changed, 91 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kvm/book3s_hv_pmu.c b/arch/powerpc/kvm/book3s_hv_pmu.c
index f7fd5190ecf7..99bd1ae2dddf 100644
--- a/arch/powerpc/kvm/book3s_hv_pmu.c
+++ b/arch/powerpc/kvm/book3s_hv_pmu.c
@@ -30,6 +30,11 @@
 #include "asm/guest-state-buffer.h"
 
 enum kvmppc_pmu_eventid {
+       KVMPPC_EVENT_HOST_HEAP,
+       KVMPPC_EVENT_HOST_HEAP_MAX,
+       KVMPPC_EVENT_HOST_PGTABLE,
+       KVMPPC_EVENT_HOST_PGTABLE_MAX,
+       KVMPPC_EVENT_HOST_PGTABLE_RECLAIM,
        KVMPPC_EVENT_MAX,
 };
 
@@ -51,8 +56,14 @@ static DEFINE_SPINLOCK(lock_l0_stats);
 /* GSB related structs needed to talk to L0 */
 static struct kvmppc_gs_msg *gsm_l0_stats;
 static struct kvmppc_gs_buff *gsb_l0_stats;
+static struct kvmppc_gs_parser gsp_l0_stats;
 
 static struct attribute *kvmppc_pmu_events_attr[] = {
+       KVMPPC_PMU_EVENT_ATTR(host_heap, KVMPPC_EVENT_HOST_HEAP),
+       KVMPPC_PMU_EVENT_ATTR(host_heap_max, KVMPPC_EVENT_HOST_HEAP_MAX),
+       KVMPPC_PMU_EVENT_ATTR(host_pagetable, KVMPPC_EVENT_HOST_PGTABLE),
+       KVMPPC_PMU_EVENT_ATTR(host_pagetable_max, 
KVMPPC_EVENT_HOST_PGTABLE_MAX),
+       KVMPPC_PMU_EVENT_ATTR(host_pagetable_reclaim, 
KVMPPC_EVENT_HOST_PGTABLE_RECLAIM),
        NULL,
 };
 
@@ -61,7 +72,7 @@ static const struct attribute_group kvmppc_pmu_events_group = 
{
        .attrs = kvmppc_pmu_events_attr,
 };
 
-PMU_FORMAT_ATTR(event, "config:0");
+PMU_FORMAT_ATTR(event, "config:0-5");
 static struct attribute *kvmppc_pmu_format_attr[] = {
        &format_attr_event.attr,
        NULL,
@@ -78,6 +89,79 @@ static const struct attribute_group 
*kvmppc_pmu_attr_groups[] = {
        NULL,
 };
 
+/*
+ * Issue the hcall to get the L0-host stats.
+ * Should be called with l0-stat lock held
+ */
+static int kvmppc_update_l0_stats(void)
+{
+       int rc;
+
+       /* With HOST_WIDE flags guestid and vcpuid will be ignored */
+       rc = kvmppc_gsb_recv(gsb_l0_stats, KVMPPC_GS_FLAGS_HOST_WIDE);
+       if (rc)
+               goto out;
+
+       /* Parse the guest state buffer is successful */
+       rc = kvmppc_gse_parse(&gsp_l0_stats, gsb_l0_stats);
+       if (rc)
+               goto out;
+
+       /* Update the l0 returned stats*/
+       memset(&l0_stats, 0, sizeof(l0_stats));
+       rc = kvmppc_gsm_refresh_info(gsm_l0_stats, gsb_l0_stats);
+
+out:
+       return rc;
+}
+
+/* Update the value of the given perf_event */
+static int kvmppc_pmu_event_update(struct perf_event *event)
+{
+       int rc;
+       u64 curr_val, prev_val;
+       unsigned long flags;
+       unsigned int config = event->attr.config;
+
+       /* Ensure no one else is modifying the l0_stats */
+       spin_lock_irqsave(&lock_l0_stats, flags);
+
+       rc = kvmppc_update_l0_stats();
+       if (!rc) {
+               switch (config) {
+               case KVMPPC_EVENT_HOST_HEAP:
+                       curr_val = l0_stats.guest_heap;
+                       break;
+               case KVMPPC_EVENT_HOST_HEAP_MAX:
+                       curr_val = l0_stats.guest_heap_max;
+                       break;
+               case KVMPPC_EVENT_HOST_PGTABLE:
+                       curr_val = l0_stats.guest_pgtable_size;
+                       break;
+               case KVMPPC_EVENT_HOST_PGTABLE_MAX:
+                       curr_val = l0_stats.guest_pgtable_size_max;
+                       break;
+               case KVMPPC_EVENT_HOST_PGTABLE_RECLAIM:
+                       curr_val = l0_stats.guest_pgtable_reclaim;
+                       break;
+               default:
+                       rc = -ENOENT;
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(&lock_l0_stats, flags);
+
+       /* If no error than update the perf event */
+       if (!rc) {
+               prev_val = local64_xchg(&event->hw.prev_count, curr_val);
+               if (curr_val > prev_val)
+                       local64_add(curr_val - prev_val, &event->count);
+       }
+
+       return rc;
+}
+
 static int kvmppc_pmu_event_init(struct perf_event *event)
 {
        unsigned int config = event->attr.config;
@@ -100,15 +184,19 @@ static int kvmppc_pmu_event_init(struct perf_event *event)
 
 static void kvmppc_pmu_del(struct perf_event *event, int flags)
 {
+       /* Do nothing */
 }
 
 static int kvmppc_pmu_add(struct perf_event *event, int flags)
 {
+       if (flags & PERF_EF_START)
+               return kvmppc_pmu_event_update(event);
        return 0;
 }
 
 static void kvmppc_pmu_read(struct perf_event *event)
 {
+       kvmppc_pmu_event_update(event);
 }
 
 /* Return the size of the needed guest state buffer */
@@ -282,6 +370,8 @@ static struct pmu kvmppc_pmu = {
        .read = kvmppc_pmu_read,
        .attr_groups = kvmppc_pmu_attr_groups,
        .type = -1,
+       .scope = PERF_PMU_SCOPE_SYS_WIDE,
+       .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 };
 
 int kvmppc_register_pmu(void)
-- 
2.47.1


Reply via email to