[ 
https://issues.apache.org/jira/browse/HIVE-24663?focusedWorklogId=600162&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-600162
 ]

ASF GitHub Bot logged work on HIVE-24663:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 21/May/21 03:40
            Start Date: 21/May/21 03:40
    Worklog Time Spent: 10m 
      Work Description: maheshk114 commented on a change in pull request #2266:
URL: https://github.com/apache/hive/pull/2266#discussion_r636613658



##########
File path: 
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
##########
@@ -5390,6 +5406,493 @@ public void countOpenTxns() throws MetaException {
     }
   }
 
+  private void cleanOldStatsFromPartColStatTable(Map<String, PartitionInfo> 
statsPartInfoMap,
+                                                 Map<String, ColumnStatistics> 
newStatsMap,
+                                                 Connection dbConn) throws 
SQLException {
+    PreparedStatement statementDelete = null;
+    int numRows = 0;
+    int maxNumRows = MetastoreConf.getIntVar(conf, 
ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE);
+    String delete = "DELETE FROM \"PART_COL_STATS\" where \"PART_ID\" = ? AND 
\"COLUMN_NAME\" = ?";
+
+    try {
+      statementDelete = dbConn.prepareStatement(delete);
+      for (Map.Entry entry : newStatsMap.entrySet()) {
+        // If the partition does not exist (deleted/removed by some other 
task), no need to update the stats.
+        if (!statsPartInfoMap.containsKey(entry.getKey())) {
+          continue;
+        }
+
+        ColumnStatistics colStats = (ColumnStatistics) entry.getValue();
+        for (ColumnStatisticsObj statisticsObj : colStats.getStatsObj()) {
+          statementDelete.setLong(1, 
statsPartInfoMap.get(entry.getKey()).partitionId);
+          statementDelete.setString(2, statisticsObj.getColName());
+          numRows++;
+          statementDelete.addBatch();
+          if (numRows == maxNumRows) {
+            statementDelete.executeBatch();
+            numRows = 0;
+            LOG.info("Executed delete " + delete + " for numRows " + numRows);
+          }
+        }
+      }
+
+      if (numRows != 0) {
+        statementDelete.executeBatch();
+      }
+    } finally {
+      closeStmt(statementDelete);
+    }
+  }
+
+  private long getMaxCSId(Connection dbConn) throws SQLException {
+    Statement stmtInt = null;
+    ResultSet rsInt = null;
+    long maxCsId = 0;
+    try {
+      stmtInt = dbConn.createStatement();
+      while (maxCsId == 0) {
+        String query = "SELECT \"NEXT_VAL\" FROM \"SEQUENCE_TABLE\" WHERE 
\"SEQUENCE_NAME\"= "
+                + 
quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics")
+                + " FOR UPDATE";
+        rsInt = stmtInt.executeQuery(query);
+        LOG.debug("Going to execute query " + query);
+        if (rsInt.next()) {
+          maxCsId = rsInt.getLong(1);
+        } else {
+          query = "INSERT INTO \"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", 
\"NEXT_VAL\")  VALUES ( "
+                  + 
quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics")
 + "," + 1
+                  + ")";
+          stmtInt.executeUpdate(query);
+        }
+      }
+      return maxCsId;
+    } finally {
+      close(rsInt, stmtInt, null);
+    }
+  }
+
+  private void updateMaxCSId(Connection dbConn, long maxCSId) throws 
SQLException {
+    Statement stmtInt = null;
+    try {
+      stmtInt = dbConn.createStatement();
+      String query = "UPDATE \"SEQUENCE_TABLE\" SET \"NEXT_VAL\" = "
+              + maxCSId
+              + " WHERE \"SEQUENCE_NAME\" = "
+              + 
quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics");
+      stmtInt.executeUpdate(query);
+      LOG.debug("Going to execute update " + query);
+    } finally {
+      closeStmt(stmtInt);
+    }
+  }
+
+  private void insertIntoPartColStatTable(Map<String, PartitionInfo> 
statsPartInfoMap,
+                                          Map<String, ColumnStatistics> 
newStatsMap,
+                                          Connection dbConn) throws 
SQLException, MetaException, NoSuchObjectException {
+    PreparedStatement statement = null;
+    long maxCsId = getMaxCSId(dbConn);
+
+    try {
+      int numRows = 0;
+      int maxNumRows = MetastoreConf.getIntVar(conf, 
ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE);
+
+      String insert = "INSERT INTO \"PART_COL_STATS\" (\"CS_ID\", 
\"CAT_NAME\", \"DB_NAME\","
+              + "\"TABLE_NAME\", \"PARTITION_NAME\", \"COLUMN_NAME\", 
\"COLUMN_TYPE\", \"PART_ID\","
+              + " \"LONG_LOW_VALUE\", \"LONG_HIGH_VALUE\", 
\"DOUBLE_HIGH_VALUE\", \"DOUBLE_LOW_VALUE\","
+              + " \"BIG_DECIMAL_LOW_VALUE\", \"BIG_DECIMAL_HIGH_VALUE\", 
\"NUM_NULLS\", \"NUM_DISTINCTS\", \"BIT_VECTOR\" ,"
+              + " \"AVG_COL_LEN\", \"MAX_COL_LEN\", \"NUM_TRUES\", 
\"NUM_FALSES\", \"LAST_ANALYZED\", \"ENGINE\") values "
+              + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 
?, ?, ?)";
+      statement = dbConn.prepareStatement(insert);
+
+      for (Map.Entry entry : newStatsMap.entrySet()) {
+        // If the partition does not exist (deleted/removed by some other 
task), no need to update the stats.
+        if (!statsPartInfoMap.containsKey(entry.getKey())) {
+          continue;
+        }
+
+        ColumnStatistics colStats = (ColumnStatistics) entry.getValue();
+        ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
+        long partId = statsPartInfoMap.get(entry.getKey()).partitionId;
+
+        for (ColumnStatisticsObj statisticsObj : colStats.getStatsObj()) {
+          MPartitionColumnStatistics mPartitionColumnStatistics = 
StatObjectConverter.
+                  convertToMPartitionColumnStatistics(null, statsDesc, 
statisticsObj, colStats.getEngine());
+
+          statement.setLong(1, maxCsId);
+          statement.setString(2, mPartitionColumnStatistics.getCatName());
+          statement.setString(3, mPartitionColumnStatistics.getDbName());
+          statement.setString(4, mPartitionColumnStatistics.getTableName());
+          statement.setString(5, 
mPartitionColumnStatistics.getPartitionName());
+          statement.setString(6, mPartitionColumnStatistics.getColName());
+          statement.setString(7, mPartitionColumnStatistics.getColType());
+          statement.setLong(8, partId);
+          statement.setObject(9, mPartitionColumnStatistics.getLongLowValue());
+          statement.setObject(10, 
mPartitionColumnStatistics.getLongHighValue());
+          statement.setObject(11, 
mPartitionColumnStatistics.getDoubleHighValue());
+          statement.setObject(12, 
mPartitionColumnStatistics.getDoubleLowValue());
+          statement.setString(13, 
mPartitionColumnStatistics.getDecimalLowValue());
+          statement.setString(14, 
mPartitionColumnStatistics.getDecimalHighValue());
+          statement.setObject(15, mPartitionColumnStatistics.getNumNulls());
+          statement.setObject(16, mPartitionColumnStatistics.getNumDVs());
+          statement.setObject(17, mPartitionColumnStatistics.getBitVector());
+          statement.setObject(18, mPartitionColumnStatistics.getAvgColLen());
+          statement.setObject(19, mPartitionColumnStatistics.getMaxColLen());
+          statement.setObject(20, mPartitionColumnStatistics.getNumTrues());
+          statement.setObject(21, mPartitionColumnStatistics.getNumFalses());
+          statement.setLong(22, mPartitionColumnStatistics.getLastAnalyzed());
+          statement.setString(23, mPartitionColumnStatistics.getEngine());
+
+          maxCsId++;
+          numRows++;
+          statement.addBatch();
+          if (numRows == maxNumRows) {
+            statement.executeBatch();
+            numRows = 0;
+          }
+        }
+      }
+
+      if (numRows != 0) {
+        statement.executeBatch();
+      }
+      updateMaxCSId(dbConn, maxCsId);
+    } finally {
+      closeStmt(statement);
+    }
+  }
+
+  private Map<Long, String> getParamValues(Connection dbConn, List<Long> 
partIdList) throws SQLException {
+    List<String> queries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+    StringBuilder suffix = new StringBuilder();
+    PreparedStatement pStmt = null;
+    ResultSet rs = null;
+
+    prefix.append("select \"PART_ID\", \"PARAM_VALUE\" "
+            + " from \"PARTITION_PARAMS\" where "
+            + " \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE' "
+            + " and ");
+    TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix,
+            partIdList, "\"PART_ID\"", false, false);
+
+    List<String> params = Collections.emptyList();
+    Map<Long, String> partIdToParaMap = new HashMap<>();
+
+    try {
+      for (String query : queries) {
+        pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, query, params);
+        LOG.debug("Going to execute query " + query);
+        rs = pStmt.executeQuery();
+        while (rs.next()) {
+          partIdToParaMap.put(rs.getLong(1), rs.getString(2));
+        }
+      }
+      return partIdToParaMap;
+    } finally {
+      close(rs, pStmt, null);
+    }
+  }
+
+  private void updateWriteIdForPartitions(Connection dbConn, long writeId, 
List<Long> partIdList) throws SQLException {
+    StringBuilder prefix = new StringBuilder();
+    List<String> queries = new ArrayList<>();
+    StringBuilder suffix = new StringBuilder();
+    prefix.append("UPDATE \"PARTITIONS\" set \"WRITE_ID\" = " + writeId + " 
where ");
+    TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix,
+            partIdList, "\"PART_ID\"", false, false);
+
+    List<String> params = Collections.emptyList();
+    PreparedStatement pStmt = null;
+    try {
+      for (String query : queries) {
+        pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, query, params);
+        LOG.debug("Going to execute query " + query);
+        pStmt.executeUpdate();
+      }
+    } finally {
+      closeStmt(pStmt);
+    }
+  }
+
+  private Map<String, Map<String, String>> 
updatePartitionParamTable(Connection dbConn,
+                                                                     
Map<String, PartitionInfo> partitionInfoMap,
+                                                                     
Map<String, ColumnStatistics> partColStatsMap,
+                                                                     
List<Long> partIdList,
+                                                                     String 
validWriteIds,
+                                                                     long 
writeId,
+                                                                     boolean 
isAcidTable)
+          throws SQLException, MetaException {
+    Map<String, Map<String, String>> result = new HashMap<>();
+    Statement stmtInt = null;
+
+    LOG.info("ETL_PERF started getParamValues ");
+    Map<Long, String> partIdToParaMap = getParamValues(dbConn, partIdList);
+    LOG.info("ETL_PERF done getParamValues ");
+
+    String insert = "INSERT INTO \"PARTITION_PARAMS\" (\"PART_ID\", 
\"PARAM_KEY\", \"PARAM_VALUE\") "
+            + "VALUES( ? , 'COLUMN_STATS_ACCURATE'  , ? )";
+    PreparedStatement statementInsert = dbConn.prepareStatement(insert);
+    int numInsert = 0;
+
+    String delete = "DELETE from \"PARTITION_PARAMS\" "
+            + " where \"PART_ID\" = ? "
+            + " and \"PARTITION_PARAMS\".\"PARAM_KEY\" = 
'COLUMN_STATS_ACCURATE'";
+    PreparedStatement statementDelete = dbConn.prepareStatement(delete);
+    int numDelete = 0;
+
+    String update = "UPDATE \"PARTITION_PARAMS\" set \"PARAM_VALUE\" = ? "
+            + " where \"PART_ID\" = ? "
+            + " and \"PARTITION_PARAMS\".\"PARAM_KEY\" = 
'COLUMN_STATS_ACCURATE'";
+    PreparedStatement statementUpdate = dbConn.prepareStatement(update);
+    int numUpdate = 0;
+
+    boolean areTxnStatsSupported = MetastoreConf.getBoolVar(conf, 
ConfVars.HIVE_TXN_STATS_ENABLED);
+    int maxNumRows = MetastoreConf.getIntVar(conf, 
ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE);
+
+    try {
+      stmtInt = dbConn.createStatement();
+      for (Map.Entry entry : partColStatsMap.entrySet()) {
+        if (!partitionInfoMap.containsKey(entry.getKey())) {
+          // Partition is dropped or removed by some concurrent thread.
+          continue;
+        }
+
+        ColumnStatistics colStats = (ColumnStatistics) entry.getValue();
+        List<String> colNames = colStats.getStatsObj().stream().map(e -> 
e.getColName()).collect(Collectors.toList());
+        long partWriteId = partitionInfoMap.get(entry.getKey()).writeId;
+        long partId = partitionInfoMap.get(entry.getKey()).partitionId;
+        Map<String, String> newParameter;
+
+        if (!partIdToParaMap.containsKey(partId)) {
+          newParameter = new HashMap<>();
+          newParameter.put(COLUMN_STATS_ACCURATE, "TRUE");
+          StatsSetupConst.setColumnStatsState(newParameter, colNames);
+          statementInsert.setLong(1, partId);
+          statementInsert.setString(2, 
newParameter.get(COLUMN_STATS_ACCURATE));
+          numInsert++;
+          statementInsert.addBatch();
+          if (numInsert == maxNumRows) {
+            statementInsert.executeBatch();
+            numInsert = 0;
+          }
+          LOG.debug(" Executing insert " + insert);
+        } else {
+          String oldStats = partIdToParaMap.get(partId);
+
+          Map<String, String> oldParameter = new HashMap<>();
+          oldParameter.put(COLUMN_STATS_ACCURATE, oldStats);
+
+          newParameter = new HashMap<>();
+          newParameter.put(COLUMN_STATS_ACCURATE, oldStats);
+          StatsSetupConst.setColumnStatsState(newParameter, colNames);
+
+          if (isAcidTable) {
+            String errorMsg = ObjectStore.verifyStatsChangeCtx(
+                    colStats.getStatsDesc().getDbName() + "." + 
colStats.getStatsDesc().getTableName(),
+                    oldParameter, newParameter, writeId, validWriteIds, true);
+            if (errorMsg != null) {
+              throw new MetaException(errorMsg);
+            }
+          }
+
+          if (isAcidTable &&
+                  (!areTxnStatsSupported || 
!ObjectStore.isCurrentStatsValidForTheQuery(oldParameter, partWriteId,
+                          validWriteIds, true))) {
+            statementDelete.setLong(1, partId);
+            statementDelete.addBatch();
+            numDelete++;
+            if (numDelete == maxNumRows) {
+              statementDelete.executeBatch();
+              numDelete = 0;
+            }
+            LOG.debug("Removed COLUMN_STATS_ACCURATE from the parameters of 
the partition "
+                    + colStats.getStatsDesc().getDbName() + "." + 
colStats.getStatsDesc().getTableName() + "."
+                    + colStats.getStatsDesc().getPartName());
+          } else {
+            statementUpdate.setString(1, 
newParameter.get(COLUMN_STATS_ACCURATE));
+            statementUpdate.setLong(2, partId);
+            statementUpdate.addBatch();
+            numUpdate++;
+            if (numUpdate == maxNumRows) {
+              statementUpdate.executeBatch();
+              numUpdate = 0;
+            }
+            LOG.debug(" Executing update " + statementUpdate);
+          }
+        }
+        result.put((String) entry.getKey(), newParameter);
+      }
+
+      if (numInsert != 0) {
+        statementInsert.executeBatch();
+      }
+
+      if (numUpdate != 0) {
+        statementUpdate.executeBatch();
+      }
+
+      if (numDelete != 0) {
+        statementDelete.executeBatch();
+      }
+
+      if (isAcidTable) {
+        LOG.info("ETL_PERF started updateWriteIdForPartitions ");
+        updateWriteIdForPartitions(dbConn, writeId, partIdList);
+        LOG.info("ETL_PERF done updateWriteIdForPartitions ");
+      }
+      return result;
+    } finally {
+      closeStmt(stmtInt);
+      closeStmt(statementInsert);
+      closeStmt(statementUpdate);
+      closeStmt(statementDelete);
+    }
+  }
+
+  private static class PartitionInfo {
+    long partitionId;
+    long writeId;
+    public PartitionInfo(long partitionId, long writeId) {
+      this.partitionId = partitionId;
+      this.writeId = writeId;
+    }
+  }
+
+  private Map<String, PartitionInfo> getPartitionInfo(Connection dbConn, 
String catName,
+                                                      String dbName, String 
tblName,
+                                                      List<String> partKeys) 
throws SQLException {
+    List<String> queries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+    StringBuilder suffix = new StringBuilder();
+    PreparedStatement pStmt = null;
+    ResultSet rs = null;
+    Map<String, PartitionInfo> partitionInfoMap = new HashMap<>();
+
+    try {
+      long tblId;
+      Statement stmt = dbConn.createStatement();
+      rs = stmt.executeQuery("select \"TBL_ID\" from \"DBS\", \"TBLS\" where 
\"DBS\".\"NAME\" = "
+              + quoteString(dbName) + " and \"DBS\".\"CTLG_NAME\" = " + 
quoteString(catName)
+              + " and \"TBLS\".\"TBL_NAME\" = " + quoteString(tblName)
+              + " and \"DBS\".\"DB_ID\" = \"TBLS\".\"DB_ID\"");
+      if (rs.next()) {
+        tblId = rs.getLong(1);
+      } else {
+        throw new RuntimeException("Invalid table name" + catName + "." + 
dbName + "." + tblName);
+      }
+
+      prefix.append("select \"PARTITIONS\".\"PART_ID\", 
\"PARTITIONS\".\"WRITE_ID\", \"PARTITIONS\".\"PART_NAME\" "
+              + " from \"PARTITIONS\" where ");
+      suffix.append(" and \"TBL_ID\" = " + tblId);
+      TxnUtils.buildQueryWithINClauseStrings(conf, queries, prefix, suffix,
+              partKeys, "\"PARTITIONS\".\"PART_NAME\"", false, false);
+
+      List<String> params = new ArrayList<>();
+      for (String query : queries) {
+        pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, query, params);
+        LOG.debug("Going to execute query <" + query + ">");
+        rs = pStmt.executeQuery();
+        while (rs.next()) {
+          PartitionInfo partitionInfo = new PartitionInfo(rs.getLong(1), 
rs.getLong(2));
+          partitionInfoMap.put(rs.getString(3), partitionInfo);
+        }
+      }
+    } finally {
+      close(rs, pStmt, null);
+    }
+    return partitionInfoMap;
+  }
+
+  @Override
+  public boolean updatePartitionColumnStatistics(Map<String, ColumnStatistics> 
partColStatsMap,
+                                                 IHMSHandler handler,
+                                                 List<MetaStoreEventListener> 
listeners,
+                                                 Table tbl,
+                                                 String validWriteIds, long 
writeId) throws MetaException {
+    Connection dbConn = null;
+    String catName = tbl.getCatName();
+    String dbName = tbl.getDbName();
+    String tableName = tbl.getTableName();
+    boolean isAcidTable = TxnUtils.isAcidTable(tbl);
+
+    LOG.info("ETL_PERF started updatePartitionColumnStatistics");

Review comment:
       I had added those for my perf tracking. I will remove those logs. These 
are not required.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 600162)
    Time Spent: 1h  (was: 50m)

> Batch process in ColStatsProcessor for partitions.
> --------------------------------------------------
>
>                 Key: HIVE-24663
>                 URL: https://issues.apache.org/jira/browse/HIVE-24663
>             Project: Hive
>          Issue Type: Improvement
>            Reporter: Rajesh Balamohan
>            Assignee: mahesh kumar behera
>            Priority: Major
>              Labels: performance, pull-request-available
>          Time Spent: 1h
>  Remaining Estimate: 0h
>
> When large number of partitions (>20K) are processed, ColStatsProcessor runs 
> into DB issues. 
> {{ db.setPartitionColumnStatistics(request);}} gets stuck for hours together 
> and in some cases postgres stops processing. 
> It would be good to introduce small batches for stats gathering in 
> ColStatsProcessor instead of bulk update.
> Ref: 
> https://github.com/apache/hive/blob/master/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java#L181
> https://github.com/apache/hive/blob/master/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java#L199



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to