This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit cfd79b40beab86f08ad72e0bea41eabf736d0a99
Author: stiga-huang <[email protected]>
AuthorDate: Tue Sep 13 16:23:40 2022 +0800

    IMPALA-11580: Fix memory leak in legacy catalog mode when applying 
incremental partition updates
    
    In the legacy catalog mode, catalogd propagates incremental metadata
    updates at the partition level. While applying the updates, impalad
    reuses the existing partition objects and moves them to a new HdfsTable
    object. However, the partition objects are immutable, which means their
    reference to the old table object remains unchanged. JVM GC cannot
    collect the stale table objects since they still have active reference
    from the partitions, which results in memory leak.
    
    This patch fixes the issue by recreating a new partition object based on
    the existing partition object with the new table field.
    
    Tests:
     - Verified locally that after applying the patch, I don’t see the
       number of live HdfsTable objects keeps bumping.
    
    Change-Id: Ie04ff243c6b82c1a06c489da74353f2d8afe423a
    Reviewed-on: http://gerrit.cloudera.org:8080/18978
    Tested-by: Impala Public Jenkins <[email protected]>
    Reviewed-by: Csaba Ringhofer <[email protected]>
---
 fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java  | 5 +++++
 fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java | 8 ++++++--
 2 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index 6acd9b9d8..88b7eec26 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -1278,6 +1278,10 @@ public class HdfsPartition extends CatalogObjectImpl
       this(partition.table_);
       oldInstance_ = partition;
       prevId_ = oldInstance_.id_;
+      copyFromPartition(partition);
+    }
+
+    public Builder copyFromPartition(HdfsPartition partition) {
       partitionKeyValues_ = partition.partitionKeyValues_;
       fileFormatDescriptor_ = partition.fileFormatDescriptor_;
       setFileDescriptors(partition);
@@ -1296,6 +1300,7 @@ public class HdfsPartition extends CatalogObjectImpl
       // Take over the in-flight events
       inFlightEvents_ = partition.inFlightEvents_;
       lastCompactionId_ = partition.lastCompactionId_;
+      return this;
     }
 
     public HdfsPartition build() {
diff --git a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java 
b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
index a03393475..bed23effa 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
@@ -494,8 +494,12 @@ public class ImpaladCatalog extends Catalog implements 
FeCatalog {
         for (PrunablePartition part : ((HdfsTable) 
existingTable).getPartitions()) {
           numExistingParts++;
           if (tHdfsTable.partitions.containsKey(part.getId())) {
-            Preconditions.checkState(
-                newHdfsTable.addPartitionNoThrow((HdfsPartition) part));
+            // Create a new partition instance under the new table object and 
copy all
+            // the fields of the existing partition.
+            HdfsPartition newPart = new HdfsPartition.Builder(newHdfsTable, 
part.getId())
+                .copyFromPartition((HdfsPartition) part)
+                .build();
+            
Preconditions.checkState(newHdfsTable.addPartitionNoThrow(newPart));
           } else {
             numDeletedParts++;
           }

Reply via email to