Previously, in hmat_register_target_initiators(), the performance
attributes are calculated and the corresponding sysfs links and files
are created too.  Which is called during memory onlining.

But now, to calculate the abstract distance of a memory target before
memory onlining, we need to calculate the performance attributes for
a memory target without creating sysfs links and files.

To do that, hmat_register_target_initiators() is refactored to make it
possible to calculate performance attributes separately.

Signed-off-by: "Huang, Ying" <ying.hu...@intel.com>
Cc: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
Cc: Wei Xu <weix...@google.com>
Cc: Alistair Popple <apop...@nvidia.com>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: Dave Hansen <dave.han...@intel.com>
Cc: Davidlohr Bueso <d...@stgolabs.net>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Jonathan Cameron <jonathan.came...@huawei.com>
Cc: Michal Hocko <mho...@kernel.org>
Cc: Yang Shi <shy828...@gmail.com>
Cc: Rafael J Wysocki <rafael.j.wyso...@intel.com>
---
 drivers/acpi/numa/hmat.c | 81 +++++++++++++++-------------------------
 1 file changed, 30 insertions(+), 51 deletions(-)

diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index bba268ecd802..2dee0098f1a9 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -582,28 +582,25 @@ static int initiators_to_nodemask(unsigned long *p_nodes)
        return 0;
 }
 
-static void hmat_register_target_initiators(struct memory_target *target)
+static void hmat_update_target_attrs(struct memory_target *target,
+                                    unsigned long *p_nodes, int access)
 {
-       static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
        struct memory_initiator *initiator;
-       unsigned int mem_nid, cpu_nid;
+       unsigned int cpu_nid;
        struct memory_locality *loc = NULL;
        u32 best = 0;
-       bool access0done = false;
        int i;
 
-       mem_nid = pxm_to_node(target->memory_pxm);
+       bitmap_zero(p_nodes, MAX_NUMNODES);
        /*
-        * If the Address Range Structure provides a local processor pxm, link
+        * If the Address Range Structure provides a local processor pxm, set
         * only that one. Otherwise, find the best performance attributes and
-        * register all initiators that match.
+        * collect all initiators that match.
         */
        if (target->processor_pxm != PXM_INVAL) {
                cpu_nid = pxm_to_node(target->processor_pxm);
-               register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
-               access0done = true;
-               if (node_state(cpu_nid, N_CPU)) {
-                       register_memory_node_under_compute_node(mem_nid, 
cpu_nid, 1);
+               if (access == 0 || node_state(cpu_nid, N_CPU)) {
+                       set_bit(target->processor_pxm, p_nodes);
                        return;
                }
        }
@@ -617,47 +614,10 @@ static void hmat_register_target_initiators(struct 
memory_target *target)
         * We'll also use the sorting to prime the candidate nodes with known
         * initiators.
         */
-       bitmap_zero(p_nodes, MAX_NUMNODES);
        list_sort(NULL, &initiators, initiator_cmp);
        if (initiators_to_nodemask(p_nodes) < 0)
                return;
 
-       if (!access0done) {
-               for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
-                       loc = localities_types[i];
-                       if (!loc)
-                               continue;
-
-                       best = 0;
-                       list_for_each_entry(initiator, &initiators, node) {
-                               u32 value;
-
-                               if (!test_bit(initiator->processor_pxm, 
p_nodes))
-                                       continue;
-
-                               value = hmat_initiator_perf(target, initiator,
-                                                           loc->hmat_loc);
-                               if (hmat_update_best(loc->hmat_loc->data_type, 
value, &best))
-                                       bitmap_clear(p_nodes, 0, 
initiator->processor_pxm);
-                               if (value != best)
-                                       clear_bit(initiator->processor_pxm, 
p_nodes);
-                       }
-                       if (best)
-                               hmat_update_target_access(target, 
loc->hmat_loc->data_type,
-                                                         best, 0);
-               }
-
-               for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
-                       cpu_nid = pxm_to_node(i);
-                       register_memory_node_under_compute_node(mem_nid, 
cpu_nid, 0);
-               }
-       }
-
-       /* Access 1 ignores Generic Initiators */
-       bitmap_zero(p_nodes, MAX_NUMNODES);
-       if (initiators_to_nodemask(p_nodes) < 0)
-               return;
-
        for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
                loc = localities_types[i];
                if (!loc)
@@ -667,7 +627,7 @@ static void hmat_register_target_initiators(struct 
memory_target *target)
                list_for_each_entry(initiator, &initiators, node) {
                        u32 value;
 
-                       if (!initiator->has_cpu) {
+                       if (access == 1 && !initiator->has_cpu) {
                                clear_bit(initiator->processor_pxm, p_nodes);
                                continue;
                        }
@@ -681,14 +641,33 @@ static void hmat_register_target_initiators(struct 
memory_target *target)
                                clear_bit(initiator->processor_pxm, p_nodes);
                }
                if (best)
-                       hmat_update_target_access(target, 
loc->hmat_loc->data_type, best, 1);
+                       hmat_update_target_access(target, 
loc->hmat_loc->data_type, best, access);
        }
+}
+
+static void __hmat_register_target_initiators(struct memory_target *target,
+                                             unsigned long *p_nodes,
+                                             int access)
+{
+       unsigned int mem_nid, cpu_nid;
+       int i;
+
+       mem_nid = pxm_to_node(target->memory_pxm);
+       hmat_update_target_attrs(target, p_nodes, access);
        for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
                cpu_nid = pxm_to_node(i);
-               register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
+               register_memory_node_under_compute_node(mem_nid, cpu_nid, 
access);
        }
 }
 
+static void hmat_register_target_initiators(struct memory_target *target)
+{
+       static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+
+       __hmat_register_target_initiators(target, p_nodes, 0);
+       __hmat_register_target_initiators(target, p_nodes, 1);
+}
+
 static void hmat_register_target_cache(struct memory_target *target)
 {
        unsigned mem_nid = pxm_to_node(target->memory_pxm);
-- 
2.39.2


Reply via email to