available_cores - Number of cores LPAR(VM) can use at this moment.
remaining cores will have CPUs marked as paravirt.

This follow stepwise approach for reducing/increasing the number of
available_cores.

Very simple Logic.
        if (steal_time > high_threshold)
                available_cores--
        if (steal_time < low_threshould)
                available_cores++

It also check previous direction taken to avoid un-necessary ping-pongs.

Note: It works well only when CPUs are spread out equal numbered across
NUMA nodes.

Originally-by: Srikar Dronamraju <[email protected]>
Signed-off-by: Shrikanth Hegde <[email protected]>
---
 arch/powerpc/platforms/pseries/lpar.c    | 53 ++++++++++++++++++++++++
 arch/powerpc/platforms/pseries/pseries.h |  1 +
 2 files changed, 54 insertions(+)

diff --git a/arch/powerpc/platforms/pseries/lpar.c 
b/arch/powerpc/platforms/pseries/lpar.c
index 935fced6e127..825b5b4e2b43 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -43,6 +43,7 @@
 #include <asm/fadump.h>
 #include <asm/dtl.h>
 #include <asm/vphn.h>
+#include <linux/sched/isolation.h>
 
 #include "pseries.h"
 
@@ -2056,6 +2057,58 @@ void pseries_init_ec_vp_cores(void)
        /* Initialize the available cores to all VP initially */
        available_cores = max(entitled_cores, virtual_procs);
 }
+
+#define STEAL_RATIO_HIGH 400
+#define STEAL_RATIO_LOW  150
+
+void update_soft_entitlement(unsigned long steal_ratio)
+{
+       static int prev_direction;
+       int cpu;
+
+       if  (!entitled_cores)
+               return;
+
+       if (steal_ratio >= STEAL_RATIO_HIGH && prev_direction > 0) {
+               /*
+                * System entitlement was reduced earlier but we continue to
+                * see steal time. Reduce entitlement further.
+                */
+               if (available_cores == entitled_cores)
+                       return;
+
+               /* Mark them paravirt, enable tick if it is nohz_full */
+               for (cpu = (available_cores - 1) * threads_per_core;
+                    cpu < available_cores * threads_per_core; cpu++) {
+                       set_cpu_paravirt(cpu, true);
+                       if (tick_nohz_full_cpu(cpu))
+                               tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
+               }
+               available_cores--;
+
+       } else if (steal_ratio <= STEAL_RATIO_LOW && prev_direction < 0) {
+               /*
+                * System entitlement was increased but we continue to see
+                * less steal time. Increase entitlement further.
+                */
+               if (available_cores == virtual_procs)
+                       return;
+
+               /* mark them avaialble */
+               for (cpu = available_cores * threads_per_core;
+                    cpu < (available_cores + 1) * threads_per_core; cpu++)
+                       set_cpu_paravirt(cpu, false);
+
+               available_cores++;
+       }
+       if (steal_ratio >= STEAL_RATIO_HIGH)
+               prev_direction = 1;
+       else if (steal_ratio <= STEAL_RATIO_LOW)
+               prev_direction = -1;
+       else
+               prev_direction = 0;
+}
 #else
 void pseries_init_ec_vp_cores(void) { return; }
+void update_soft_entitlement(unsigned long steal_ratio) { return; }
 #endif
diff --git a/arch/powerpc/platforms/pseries/pseries.h 
b/arch/powerpc/platforms/pseries/pseries.h
index 3968a6970fa8..d1f9ec77ff57 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -115,6 +115,7 @@ int dlpar_workqueue_init(void);
 
 extern u32 pseries_security_flavor;
 void pseries_setup_security_mitigations(void);
+void update_soft_entitlement(unsigned long steal_ratio);
 
 #ifdef CONFIG_PPC_64S_HASH_MMU
 void pseries_lpar_read_hblkrm_characteristics(void);
-- 
2.47.3


Reply via email to