This patch updates the output from /proc/ppc64/lparcfg to display the
processor virtualization resource allocations for a shared processor
partition.

This information is already gathered via the h_get_ppp call, we just
have to make sure that the ibm,partition-performance-parameters-level
property is >= 1 to ensure that the information is valid.

Signed-off-by: Nathan Fontenot <nf...@austin.ibm.com>
---

Index: linux-2.6/arch/powerpc/kernel/lparcfg.c
===================================================================
--- linux-2.6.orig/arch/powerpc/kernel/lparcfg.c        2009-05-21 
10:24:57.000000000 -0500
+++ linux-2.6/arch/powerpc/kernel/lparcfg.c     2009-05-26 11:06:59.000000000 
-0500
@@ -169,6 +169,9 @@
        u8      unallocated_weight;
        u16     active_procs_in_pool;
        u16     active_system_procs;
+       u16     phys_platform_procs;
+       u32     max_proc_cap_avail;
+       u32     entitled_proc_cap_avail;
};

/*
@@ -190,13 +193,18 @@
 *            XX - Unallocated Variable Processor Capacity Weight.
 *              XXXX - Active processors in Physical Processor Pool.
 *                  XXXX  - Processors active on platform.
+ *  R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
+ *     XXXX - Physical platform procs allocated to virtualization.
+ *         XXXXXX - Max procs capacity % available to the partitions pool.
+ *               XXXXXX - Entitled procs capacity % available to the
+ *                        partitions pool.
 */
static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
{
        unsigned long rc;
-       unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+       unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];

-       rc = plpar_hcall(H_GET_PPP, retbuf);
+       rc = plpar_hcall9(H_GET_PPP, retbuf);

        ppp_data->entitlement = retbuf[0];
        ppp_data->unallocated_entitlement = retbuf[1];
@@ -210,6 +218,10 @@
        ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
        ppp_data->active_system_procs = retbuf[3] & 0xffff;

+       ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
+       ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
+       ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
+
        return rc;
}

@@ -234,6 +246,8 @@
static void parse_ppp_data(struct seq_file *m)
{
        struct hvcall_ppp_data ppp_data;
+       struct device_node *root;
+       const int *perf_level;
        int rc;

        rc = h_get_ppp(&ppp_data);
@@ -267,6 +281,28 @@
        seq_printf(m, "capped=%d\n", ppp_data.capped);
        seq_printf(m, "unallocated_capacity=%lld\n",
                   ppp_data.unallocated_entitlement);
+
+       /* The last bits of information returned from h_get_ppp are only
+        * valid if the ibm,partition-performance-parameters-level
+        * property is >= 1.
+        */
+       root = of_find_node_by_path("/");
+       if (root) {
+               perf_level = of_get_property(root,
+                               "ibm,partition-performance-parameters-level",
+                                            NULL);
+               if (*perf_level >= 1) {
+                       seq_printf(m,
+                           "physical_procs_allocated_to_virtualization=%d\n",
+                                  ppp_data.phys_platform_procs);
+                       seq_printf(m, "max_proc_capacity_available=%d\n",
+                                  ppp_data.max_proc_cap_avail);
+                       seq_printf(m, "entitled_proc_capacity_available=%d\n",
+                                  ppp_data.entitled_proc_cap_avail);
+               }
+
+               of_node_put(root);
+       }
}

/**
_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

Reply via email to