This is an automated email from the ASF dual-hosted git repository.

lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new 23985653a1 [core] Fix DataEvolutionCompactTask potential resource leak 
(#7634)
23985653a1 is described below

commit 23985653a1ee0e5716aaf48e016ce3e029249a07
Author: yuzelin <[email protected]>
AuthorDate: Mon Apr 13 16:56:02 2026 +0800

    [core] Fix DataEvolutionCompactTask potential resource leak (#7634)
---
 .../append/dataevolution/DataEvolutionCompactTask.java       | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git 
a/paimon-core/src/main/java/org/apache/paimon/append/dataevolution/DataEvolutionCompactTask.java
 
b/paimon-core/src/main/java/org/apache/paimon/append/dataevolution/DataEvolutionCompactTask.java
index 334780ef8e..c16a4ab801 100644
--- 
a/paimon-core/src/main/java/org/apache/paimon/append/dataevolution/DataEvolutionCompactTask.java
+++ 
b/paimon-core/src/main/java/org/apache/paimon/append/dataevolution/DataEvolutionCompactTask.java
@@ -37,6 +37,9 @@ import org.apache.paimon.utils.FileStorePathFactory;
 import org.apache.paimon.utils.RecordWriter;
 import org.apache.paimon.utils.SetUtils;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -52,6 +55,8 @@ import static 
org.apache.paimon.utils.Preconditions.checkArgument;
 /** Data evolution table compaction task. */
 public class DataEvolutionCompactTask extends AppendCompactTask {
 
+    private static final Logger LOG = 
LoggerFactory.getLogger(DataEvolutionCompactTask.class);
+
     private static final Map<String, String> DYNAMIC_WRITE_OPTIONS =
             Collections.singletonMap(CoreOptions.TARGET_FILE_SIZE.key(), 
"99999 G");
 
@@ -122,6 +127,13 @@ public class DataEvolutionCompactTask extends 
AppendCompactTask {
         checkArgument(
                 writeResult.size() == 1, "Data evolution compaction should 
produce one file.");
 
+        try {
+            writer.close();
+            storeWrite.close();
+        } catch (Exception e) {
+            LOG.warn("Failed to close reader and writer.", e);
+        }
+
         DataFileMeta dataFileMeta = 
writeResult.get(0).assignFirstRowId(firstRowId);
         long minSequenceId =
                 compactBefore.stream()

Reply via email to