Commit 540a637c3410780b519fc055f432afe271f642f8 defines a new
helper mark_page_free to extract common codes, but it broke the
local variable "tainted", revealed by Coverity ID 1491872.

This patch let mark_page_free() return boolean value of variable
"tainted" and rename local variable "tainted" to "pg_tainted"
to tell the difference from the global variable of the same name.

Signed-off-by: Penny Zheng <penny.zh...@arm.com>
---
 xen/common/page_alloc.c | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index b9441cb06f..c6f48f7a97 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1380,8 +1380,10 @@ bool scrub_free_pages(void)
     return node_to_scrub(false) != NUMA_NO_NODE;
 }
 
-static void mark_page_free(struct page_info *pg, mfn_t mfn)
+static bool mark_page_free(struct page_info *pg, mfn_t mfn)
 {
+    unsigned int pg_tainted = 0;
+
     ASSERT(mfn_x(mfn) == mfn_x(page_to_mfn(pg)));
 
     /*
@@ -1405,7 +1407,7 @@ static void mark_page_free(struct page_info *pg, mfn_t 
mfn)
     case PGC_state_offlining:
         pg->count_info = (pg->count_info & PGC_broken) |
                          PGC_state_offlined;
-        tainted = 1;
+        pg_tainted = 1;
         break;
 
     default:
@@ -1425,6 +1427,8 @@ static void mark_page_free(struct page_info *pg, mfn_t 
mfn)
     /* This page is not a guest frame any more. */
     page_set_owner(pg, NULL); /* set_gpfn_from_mfn snoops pg owner */
     set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+
+    return pg_tainted;
 }
 
 /* Free 2^@order set of pages. */
@@ -1433,7 +1437,7 @@ static void free_heap_pages(
 {
     unsigned long mask;
     mfn_t mfn = page_to_mfn(pg);
-    unsigned int i, node = phys_to_nid(mfn_to_maddr(mfn)), tainted = 0;
+    unsigned int i, node = phys_to_nid(mfn_to_maddr(mfn)), pg_tainted = 0;
     unsigned int zone = page_to_zone(pg);
 
     ASSERT(order <= MAX_ORDER);
@@ -1443,7 +1447,8 @@ static void free_heap_pages(
 
     for ( i = 0; i < (1 << order); i++ )
     {
-        mark_page_free(&pg[i], mfn_add(mfn, i));
+        if ( mark_page_free(&pg[i], mfn_add(mfn, i)) )
+            pg_tainted = 1;
 
         if ( need_scrub )
         {
@@ -1517,7 +1522,7 @@ static void free_heap_pages(
 
     page_list_add_scrub(pg, node, zone, order, pg->u.free.first_dirty);
 
-    if ( tainted )
+    if ( pg_tainted )
         reserve_offlined_page(pg);
 
     spin_unlock(&heap_lock);
-- 
2.25.1


Reply via email to