luoyuxia commented on a change in pull request #16700:
URL: https://github.com/apache/flink/pull/16700#discussion_r684033345



##########
File path: 
flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserSemanticAnalyzer.java
##########
@@ -633,80 +623,59 @@ private HiveParserASTNode 
genValuesTempTable(HiveParserASTNode originalFrom, Hiv
 
         // The second child of the TOK_VIRTUAL_TABLE should be TOK_VALUES_TABLE
         HiveParserASTNode valuesTable = (HiveParserASTNode) 
fromChildren.get(1);
-        assert valuesTable.getToken().getType() == 
HiveASTParser.TOK_VALUES_TABLE
-                : "Expected second child of TOK_VIRTUAL_TABLE to be 
TOK_VALUE_TABLE but was "
-                        + valuesTable.getName();
-        // Each of the children of TOK_VALUES_TABLE will be a TOK_VALUE_ROW
-        List<? extends Node> valuesTableChildren = valuesTable.getChildren();
-
-        // Now that we're going to start reading through the rows, open a file 
to write the rows too
-        // If we leave this method before creating the temporary table we need 
to be sure to clean
-        // up this file.
-        Path tablePath = null;
-        FileSystem fs = null;
-        FSDataOutputStream out = null;
+        Preconditions.checkArgument(
+                valuesTable.getToken().getType() == 
HiveASTParser.TOK_VALUES_TABLE,
+                "Expected second child of TOK_VIRTUAL_TABLE to be 
TOK_VALUE_TABLE but was "
+                        + valuesTable.getName());
+
+        // Pick a name for the table
+        SessionState ss = SessionState.get();
+        String tableName =
+                (VALUES_TMP_TABLE_NAME_PREFIX + 
ss.getNextValuesTempTableSuffix()).toLowerCase();
+
+        List<? extends Node> rows = valuesTable.getChildren();
+        List<List<String>> valuesData = new ArrayList<>(rows.size());
         try {
-            if (dataDir == null) {
-                tablePath = Warehouse.getDnsPath(new 
Path(ss.getTempTableSpace(), tableName), conf);
-            } else {
-                // if target table of insert is encrypted, make sure temporary 
table data is stored
-                // similarly encrypted
-                tablePath = Warehouse.getDnsPath(new Path(dataDir, tableName), 
conf);
-            }
-            fs = tablePath.getFileSystem(conf);
-            fs.mkdirs(tablePath);
-            Path dataFile = new Path(tablePath, "data_file");
-            out = fs.create(dataFile);
             List<FieldSchema> fields = new ArrayList<>();
 
             boolean firstRow = true;
-            for (Node n : valuesTableChildren) {
-                HiveParserASTNode valuesRow = (HiveParserASTNode) n;
-                assert valuesRow.getToken().getType() == 
HiveASTParser.TOK_VALUE_ROW
-                        : "Expected child of TOK_VALUE_TABLE to be 
TOK_VALUE_ROW but was "
-                                + valuesRow.getName();
+            for (Node n : rows) {
+                // Each of the children of TOK_VALUES_TABLE will be a 
TOK_VALUE_ROW
+                HiveParserASTNode row = (HiveParserASTNode) n;
+                Preconditions.checkArgument(
+                        row.getToken().getType() == 
HiveASTParser.TOK_VALUE_ROW,
+                        "Expected child of TOK_VALUE_TABLE to be TOK_VALUE_ROW 
but was "
+                                + row.getName());
                 // Each of the children of this should be a literal
-                List<? extends Node> valuesRowChildren = 
valuesRow.getChildren();
-                boolean isFirst = true;
+                List<? extends Node> columns = row.getChildren();
+                List<String> data = new ArrayList<>(columns.size());
                 int nextColNum = 1;
-                for (Node n1 : valuesRowChildren) {
-                    HiveParserASTNode value = (HiveParserASTNode) n1;
+                for (Node n1 : columns) {
+                    HiveParserASTNode column = (HiveParserASTNode) n1;
                     if (firstRow) {
                         fields.add(new FieldSchema("tmp_values_col" + 
nextColNum++, "string", ""));
                     }
-                    if (isFirst) {
-                        isFirst = false;
-                    } else {
-                        HiveParserUtils.writeAsText("\u0001", out);
-                    }
-                    
HiveParserUtils.writeAsText(unparseExprForValuesClause(value), out);
+                    data.add(unparseExprForValuesClause(column));
                 }
-                HiveParserUtils.writeAsText("\n", out);
                 firstRow = false;
+                valuesData.add(data);
             }
 
             // Step 2, create a temp table, using the created file as the data
             Table table = db.newTable(tableName);
             
table.setSerializationLib(conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE));
             HiveTableUtil.setStorageFormat(table.getSd(), "TextFile", conf);
             table.setFields(fields);
-            table.setDataLocation(tablePath);
+            // make up a path for this table
+            table.setDataLocation(

Review comment:
       Do we stiil need to setDataLocation for this temp table as we store the 
temp table's data in memory?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to