[ https://issues.apache.org/jira/browse/HIVE-24596?focusedWorklogId=590188&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-590188 ]
ASF GitHub Bot logged work on HIVE-24596: ----------------------------------------- Author: ASF GitHub Bot Created on: 28/Apr/21 07:36 Start Date: 28/Apr/21 07:36 Worklog Time Spent: 10m Work Description: rbalamohan commented on a change in pull request #2033: URL: https://github.com/apache/hive/pull/2033#discussion_r621894541 ########## File path: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLPlanUtils.java ########## @@ -0,0 +1,1005 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.ql.ddl.ShowUtils; +import org.apache.hadoop.hive.ql.ddl.table.create.CreateTableOperation; +import org.apache.hadoop.hive.ql.metadata.CheckConstraint; +import org.apache.hadoop.hive.ql.metadata.CheckConstraint.CheckConstraintCol; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint.DefaultConstraintCol; +import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; +import org.apache.hadoop.hive.ql.util.DirectionUtils; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo; +import org.apache.hive.common.util.HiveStringUtils; +import org.stringtemplate.v4.ST; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Base64; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; + +public class DDLPlanUtils { + private static final String EXTERNAL = "external"; + private static final String TEMPORARY = "temporary"; + private static final String LIST_COLUMNS = "columns"; + private static final String COMMENT = "comment"; + private static final String PARTITIONS = "partitions"; + private static final String BUCKETS = "buckets"; + private static final String SKEWED = "skewedinfo"; + private static final String ROW_FORMAT = "row_format"; + private static final String LOCATION_BLOCK = "location_block"; + private static final String LOCATION = "location"; + private static final String PROPERTIES = "properties"; + private static final String TABLE_NAME = "TABLE_NAME"; + private static final String DATABASE_NAME = "DATABASE_NAME"; + private static final String DATABASE_NAME_FR = "DATABASE_NAME_FR"; + private static final String PARTITION = "PARTITION"; + private static final String COLUMN_NAME = "COLUMN_NAME"; + private static final String TBLPROPERTIES = "TABLE_PROPERTIES"; + private static final String PARTITION_NAME = "PARTITION_NAME"; + private static final String CONSTRAINT_NAME = "CONSTRAINT_NAME"; + private static final String COL_NAMES = "COLUMN_NAMES"; + private static final String CHILD_TABLE_NAME = "CHILD_TABLE_NAME"; + private static final String PARENT_TABLE_NAME = "PARENT_TABLE_NAME"; + private static final String CHILD_COL_NAME = "CHILD_COL_NAME"; + private static final String PARENT_COL_NAME = "PARENT_COL_NAME"; + private static final String CHECK_EXPRESSION = "CHECK_EXPRESSION"; + private static final String DEFAULT_VALUE = "DEFAULT_VALUE"; + private static final String COL_TYPE = "COL_TYPE"; + private static final String SQL = "SQL"; + private static final String COMMENT_SQL = "COMMENT_SQL"; + private static final String HIVE_DEFAULT_PARTITION = "__HIVE_DEFAULT_PARTITION__"; + private static final String BASE_64_VALUE = "BASE_64"; + private static final String numNulls = "'numNulls'='"; + private static final String numDVs = "'numDVs'='"; + private static final String numTrues = "'numTrues'='"; + private static final String numFalses = "'numFalses'='"; + private static final String lowValue = "'lowValue'='"; + private static final String highValue = "'highValue'='"; + private static final String avgColLen = "'avgColLen'='"; + private static final String maxColLen = "'maxColLen'='"; + private static final String[] req = {"numRows", "rawDataSize"}; + private static final String[] explain_plans = {"EXPLAIN", "EXPLAIN CBO", "EXPLAIN VECTORIZED"}; + + private static final String CREATE_DATABASE_STMT = "CREATE DATABASE IF NOT EXISTS <" + DATABASE_NAME + ">;"; + + private final String CREATE_TABLE_TEMPLATE = + "CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE <if(" + DATABASE_NAME + ")>`<" + DATABASE_NAME + ">`.<endif>" + + "`<" + TABLE_NAME + ">`(\n" + + "<" + LIST_COLUMNS + ">)\n" + + "<" + COMMENT + ">\n" + + "<" + PARTITIONS + ">\n" + + "<" + BUCKETS + ">\n" + + "<" + SKEWED + ">\n" + + "<" + ROW_FORMAT + ">\n" + + "<" + LOCATION_BLOCK + ">" + + "TBLPROPERTIES (\n" + + "<" + PROPERTIES + ">)"; + + private static final String CREATE_VIEW_TEMPLATE = + "CREATE VIEW <if(" + DATABASE_NAME + ")>`<" + DATABASE_NAME + ">`.<endif>`<" + TABLE_NAME + + ">`<" + PARTITIONS + "> AS <" + SQL +">"; + + private final String CREATE_TABLE_TEMPLATE_LOCATION = "LOCATION\n" + + "<" + LOCATION + ">\n"; + + private final Set<String> PROPERTIES_TO_IGNORE_AT_TBLPROPERTIES = Sets.union( + ImmutableSet.of("TEMPORARY", "EXTERNAL", "comment", "SORTBUCKETCOLSPREFIX", META_TABLE_STORAGE), + new HashSet<String>(StatsSetupConst.TABLE_PARAMS_STATS_KEYS)); + + private final String ALTER_TABLE_CREATE_PARTITION = "<if(" + COMMENT_SQL + ")><" + COMMENT_SQL + "> <endif>" + "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + + "> ADD IF NOT EXISTS PARTITION (<" + PARTITION + + ">);"; + + private final String ALTER_TABLE_UPDATE_STATISTICS_TABLE_COLUMN = "ALTER TABLE <" + + DATABASE_NAME + ">.<" + + TABLE_NAME + "> UPDATE STATISTICS FOR COLUMN <" + + COLUMN_NAME + "> SET(<" + TBLPROPERTIES + "> );"; + + private final String ALTER_TABLE_UPDATE_STATISTICS_PARTITION_COLUMN = "<if(" + COMMENT_SQL + ")><" + COMMENT_SQL + "> <endif>" + "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + + "> PARTITION (<" + PARTITION_NAME + + ">) UPDATE STATISTICS FOR COLUMN <" + + COLUMN_NAME + "> SET(<" + TBLPROPERTIES + "> );"; + + private final String ALTER_TABLE_UPDATE_STATISTICS_TABLE_BASIC = "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + + "> UPDATE STATISTICS SET(<" + TBLPROPERTIES + "> );"; + + private final String ALTER_TABLE_UPDATE_STATISTICS_PARTITION_BASIC = "<if(" + COMMENT_SQL + ")><" + COMMENT_SQL + "> <endif>" + "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + "> PARTITION (<" + + PARTITION_NAME + ">) UPDATE STATISTICS SET(<" + TBLPROPERTIES + "> );"; + private final String ALTER_TABLE_ADD_PRIMARY_KEY = "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + "> ADD CONSTRAINT <" + + CONSTRAINT_NAME + "> PRIMARY KEY (<" + COL_NAMES + ">) DISABLE NOVALIDATE;"; + + private final String ALTER_TABLE_ADD_FOREIGN_KEY = "ALTER TABLE <" + + DATABASE_NAME + ">.<" + CHILD_TABLE_NAME + "> ADD CONSTRAINT <" + + CONSTRAINT_NAME + "> FOREIGN KEY (<" + CHILD_COL_NAME + ">) REFERENCES <" + + DATABASE_NAME_FR + ">.<" + PARENT_TABLE_NAME + ">(<" + PARENT_COL_NAME + ">) DISABLE NOVALIDATE RELY;"; + + private final String ALTER_TABLE_ADD_UNIQUE_CONSTRAINT = "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + "> ADD CONSTRAINT <" + + CONSTRAINT_NAME + "> UNIQUE (<" + COLUMN_NAME + ">) DISABLE NOVALIDATE;"; + + private final String ALTER_TABLE_ADD_CHECK_CONSTRAINT = "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + + "> ADD CONSTRAINT <" + CONSTRAINT_NAME + "> CHECK (<" + + CHECK_EXPRESSION + ">) DISABLE;"; + + private final String ALTER_TABLE_ADD_NOT_NULL_CONSTRAINT = "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + "> CHANGE COLUMN < " + + COLUMN_NAME + "> <" + COLUMN_NAME + + "> <" + COL_TYPE + "> CONSTRAINT <" + CONSTRAINT_NAME + "> NOT NULL DISABLE;"; + + private final String ALTER_TABLE_ADD_DEFAULT_CONSTRAINT = "ALTER TABLE <" + + DATABASE_NAME + ">.<" + TABLE_NAME + "> CHANGE COLUMN < " + + COLUMN_NAME + "> <" + COLUMN_NAME + + "> <" + COL_TYPE + "> CONSTRAINT <" + CONSTRAINT_NAME + "> DEFAULT <" + DEFAULT_VALUE + "> DISABLE;"; + + private final String EXIST_BIT_VECTORS = "-- BIT VECTORS PRESENT FOR <" + DATABASE_NAME + ">.<" + TABLE_NAME + "> " + + "FOR COLUMN <" + COLUMN_NAME + "> BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS <" + + BASE_64_VALUE +"> "; + + private final String EXIST_BIT_VECTORS_PARTITIONED = "-- BIT VECTORS PRESENT FOR <" + DATABASE_NAME + ">.<" + + TABLE_NAME + "> PARTITION <" + PARTITION_NAME + "> FOR COLUMN <" + + COLUMN_NAME + "> BUT THEY ARE NOT SUPPORTED YET.THE BASE64 VALUE FOR THE BITVECTOR IS <" + + BASE_64_VALUE +"> "; + + /** + * Returns the create database query for a give database name. + * + * @param dbNames + * @return + */ + public List<String> getCreateDatabaseStmt(Set<String> dbNames) { + List<String> allDbStmt = new ArrayList<String>(); + for (String dbName : dbNames) { + if (dbName.equals("default")) { + continue; + } + ST command = new ST(CREATE_DATABASE_STMT); + command.add(DATABASE_NAME, dbName); + allDbStmt.add(command.render()); + } + return allDbStmt; + } + + public Map<String, String> getTableColumnsToType(Table tbl) { + List<FieldSchema> fieldSchemas = tbl.getAllCols(); + Map<String, String> ret = new HashMap<String, String>(); + fieldSchemas.stream().forEach(f -> ret.put(f.getName(), f.getType())); + return ret; + } + + public List<String> getTableColumnNames(Table tbl) { + List<FieldSchema> fieldSchemas = tbl.getAllCols(); + List<String> ret = new ArrayList<String>(); + fieldSchemas.stream().forEach(f -> ret.add(f.getName())); + return ret; + } + + public String getPartitionActualName(Partition pt) { + Map<String, String> colTypeMap = getTableColumnsToType(pt.getTable()); + String[] partColsDef = pt.getName().split(","); + List<String> ptParam = new ArrayList<>(); + for (String partCol : partColsDef) { + String[] colValue = partCol.split("="); + if (colTypeMap.get(colValue[0]).equalsIgnoreCase("string")) { + ptParam.add(colValue[0] + "='" + colValue[1] + "'"); + } else { + ptParam.add(colValue[0] + "=" + colValue[1]); + } + } + return StringUtils.join(ptParam, ","); + } + + public boolean checkIfDefaultPartition(String pt){ + if(pt.contains(HIVE_DEFAULT_PARTITION)){ + return true; + } + else { + return false; + } + } + + /** + * Creates the alter table command to add a partition to the given table. + * + * @param pt + * @return + * @throws MetaException + */ + public String getAlterTableAddPartition(Partition pt) throws MetaException { + Table tb = pt.getTable(); + ST command = new ST(ALTER_TABLE_CREATE_PARTITION); + command.add(DATABASE_NAME, tb.getDbName()); + command.add(TABLE_NAME, tb.getTableName()); + command.add(PARTITION, getPartitionActualName(pt)); + if(checkIfDefaultPartition(pt.getName())){ + command.add(COMMENT_SQL, "--"); + } + return command.render(); + } + + public void addLongStats(ColumnStatisticsData cd, List<String> ls) { + if (!cd.isSetLongStats()) { + return; + } + LongColumnStatsData lg = cd.getLongStats(); + ls.add(lowValue + lg.getLowValue() + "'"); + ls.add(highValue + lg.getHighValue() + "'"); + ls.add(numNulls + lg.getNumNulls() + "'"); + ls.add(numDVs + lg.getNumDVs() + "'"); + return; + } + + public void addBooleanStats(ColumnStatisticsData cd, List<String> ls) { + if (!cd.isSetBooleanStats()) { + return; + } + BooleanColumnStatsData bd = cd.getBooleanStats(); + ls.add(numTrues + bd.getNumFalses() + "'"); + ls.add(numFalses + bd.getNumTrues() + "'"); + ls.add(numNulls + bd.getNumNulls() + "'"); + return; + } + + public void addStringStats(ColumnStatisticsData cd, List<String> ls) { + if (!cd.isSetStringStats()) { + return; + } + StringColumnStatsData lg = cd.getStringStats(); + ls.add(avgColLen + lg.getAvgColLen() + "'"); + ls.add(maxColLen + lg.getMaxColLen() + "'"); + ls.add(numNulls + lg.getNumNulls() + "'"); + ls.add(numDVs + lg.getNumDVs() + "'"); + return; + } + + public void addDateStats(ColumnStatisticsData cd, List<String> ls) { + if (!cd.isSetDateStats()) { + return; + } + DateColumnStatsData dt = cd.getDateStats(); + ls.add(lowValue + dt.getLowValue().getDaysSinceEpoch() + "'"); + ls.add(highValue + dt.getHighValue().getDaysSinceEpoch() + "'"); + ls.add(numNulls + dt.getNumNulls() + "'"); + ls.add(numDVs + dt.getNumDVs() + "'"); + return; + } + + public void addBinaryStats(ColumnStatisticsData cd, List<String> ls) { + if (!cd.isSetBinaryStats()) { + return; + } + BinaryColumnStatsData bd = cd.getBinaryStats(); + ls.add(avgColLen + bd.getAvgColLen() + "'"); + ls.add(maxColLen + bd.getMaxColLen() + "'"); + ls.add(numNulls + bd.getNumNulls() + "'"); + return; + } + + public byte[] setByteArrayToLongSize(byte[] arr) { + byte[] temp = new byte[Math.max(arr.length, 8)]; + for (int i = 0; i < arr.length; i++) { + temp[i] = arr[i]; + } + for (int i = arr.length; i < 8; i++) { + temp[i] = (byte) 0; + } + return temp; + } + + public void addDecimalStats(ColumnStatisticsData cd, List<String> ls) { + if (!cd.isSetDecimalStats()) { + return; + } + DecimalColumnStatsData dc = cd.getDecimalStats(); + if(dc.isSetHighValue()) { + byte[] highValArr = setByteArrayToLongSize(dc.getHighValue().getUnscaled()); + ls.add(highValue + ByteBuffer.wrap(highValArr).getLong() + "E" + dc.getHighValue().getScale() + "'"); + } + if(dc.isSetLowValue()) { + byte[] lowValArr = setByteArrayToLongSize(dc.getLowValue().getUnscaled()); + ls.add(lowValue + ByteBuffer.wrap(lowValArr).getLong() + "E" + dc.getLowValue().getScale() + "'"); + } + ls.add(numNulls + dc.getNumNulls() + "'"); + ls.add(numDVs + dc.getNumDVs() + "'"); + } + + public void addDoubleStats(ColumnStatisticsData cd, List<String> ls) { + if (!cd.isSetDoubleStats()) { + return; + } + DoubleColumnStatsData dc = cd.getDoubleStats(); + ls.add(numNulls + dc.getNumNulls() + "'"); + ls.add(numDVs + dc.getNumDVs() + "'"); + ls.add(highValue + dc.getHighValue() + "'"); + ls.add(lowValue + dc.getLowValue() + "'"); + } + + public String checkBitVectors(ColumnStatisticsData cd) { + if (cd.isSetDoubleStats() && cd.getDoubleStats().isSetBitVectors()) { + return Base64.getEncoder().encodeToString(cd.getDoubleStats().getBitVectors()); + } + if (cd.isSetBinaryStats() && cd.getBinaryStats().isSetBitVectors()) { + return Base64.getEncoder().encodeToString(cd.getBinaryStats().getBitVectors()); + } + if (cd.isSetStringStats() && cd.getStringStats().isSetBitVectors()) { + return Base64.getEncoder().encodeToString(cd.getStringStats().getBitVectors()); + } + if (cd.isSetDateStats() && cd.getDateStats().isSetBitVectors()) { + return Base64.getEncoder().encodeToString(cd.getDateStats().getBitVectors()); + } + if (cd.isSetLongStats() && cd.getLongStats().isSetBitVectors()) { + return Base64.getEncoder().encodeToString(cd.getLongStats().getBitVectors()); + } + if (cd.isSetDecimalStats() && cd.getDecimalStats().isSetBitVectors()) { + return Base64.getEncoder().encodeToString(cd.getDecimalStats().getBitVectors()); + } + if (cd.isSetDoubleStats() && cd.getBooleanStats().isSetBitVectors()) { + return Base64.getEncoder().encodeToString(cd.getBooleanStats().getBitVectors()); + } + return null; + } + + /** + * Parses the basic ColumnStatObject and returns the alter tables stmt for each individual column in a table. + * + * @param columnStatisticsData + * @param colName + * @param tblName + * @param dbName + * @return + */ + public String getAlterTableStmtCol(ColumnStatisticsData columnStatisticsData, String colName, String tblName, String dbName) { + ST command = new ST(ALTER_TABLE_UPDATE_STATISTICS_TABLE_COLUMN); + command.add(DATABASE_NAME, dbName); + command.add(TABLE_NAME, tblName); + command.add(COLUMN_NAME, colName); + List<String> temp = new ArrayList<>(); + addBinaryStats(columnStatisticsData, temp); + addLongStats(columnStatisticsData, temp); + addBooleanStats(columnStatisticsData, temp); + addStringStats(columnStatisticsData, temp); + addDateStats(columnStatisticsData, temp); + addDoubleStats(columnStatisticsData, temp); + addDecimalStats(columnStatisticsData, temp); + command.add(TBLPROPERTIES, Joiner.on(",").join(temp)); + return command.render(); + } + + /** + * Parses the ColumnStatistics for all the columns in a given table and adds the alter table update + * statistics command for each column. + * + * @param tbl + */ + public List<String> getAlterTableStmtTableStatsColsAll(Table tbl) + throws HiveException { + List<String> alterTblStmt = new ArrayList<String>(); + List<String> accessedColumns = getTableColumnNames(tbl); + List<ColumnStatisticsObj> tableColumnStatistics = Hive.get().getTableColumnStatistics(tbl.getDbName(), + tbl.getTableName(), + accessedColumns, + true); + ColumnStatisticsObj[] columnStatisticsObj = tableColumnStatistics.toArray(new ColumnStatisticsObj[0]); + for (int i = 0; i < columnStatisticsObj.length; i++) { + alterTblStmt.add(getAlterTableStmtCol(columnStatisticsObj[i].getStatsData(), + columnStatisticsObj[i].getColName(), + tbl.getTableName(), tbl.getDbName())); + String base64 = checkBitVectors(columnStatisticsObj[i].getStatsData()); + if (base64 != null) { + ST command = new ST(EXIST_BIT_VECTORS); + command.add(DATABASE_NAME, tbl.getDbName()); + command.add(TABLE_NAME, tbl.getTableName()); + command.add(COLUMN_NAME, columnStatisticsObj[i].getColName()); + command.add(BASE_64_VALUE, base64); + alterTblStmt.add(command.render()); + } + } + return alterTblStmt; + } + + /** + * Parses the basic ColumnStatObject and returns the alter tables stmt for each individual column in a partition. + * + * @param columnStatisticsData + * @param colName + * @param tblName + * @param ptName + * @param dbName + * @return + */ + + public String getAlterTableStmtPartitionColStat(ColumnStatisticsData columnStatisticsData, String colName, + String tblName, String ptName, String dbName) { + ST command = new ST(ALTER_TABLE_UPDATE_STATISTICS_PARTITION_COLUMN); + command.add(DATABASE_NAME, dbName); + command.add(TABLE_NAME, tblName); + command.add(COLUMN_NAME, colName); + command.add(PARTITION_NAME, ptName); + List<String> temp = new ArrayList<>(); + addBinaryStats(columnStatisticsData, temp); + addLongStats(columnStatisticsData, temp); + addBooleanStats(columnStatisticsData, temp); + addStringStats(columnStatisticsData, temp); + addDateStats(columnStatisticsData, temp); + addDoubleStats(columnStatisticsData, temp); + addDecimalStats(columnStatisticsData, temp); + command.add(TBLPROPERTIES, Joiner.on(",").join(temp)); + if(checkIfDefaultPartition(ptName)){ + command.add(COMMENT_SQL, "--"); + } + return command.render(); + } + + /** + * Parses the ColumnStatistics for all the columns in a given partition and adds the alter table update + * statistics command for each column. + * + * @param columnStatisticsObjList + * @param colName + * @param tblName + * @param ptName + * @param dbName + */ + public List<String> getAlterTableStmtPartitionStatsColsAll(List<ColumnStatisticsObj> columnStatisticsObjList, + List<String> colName, Review comment: This is never used. Is it needed? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 590188) Time Spent: 2h 10m (was: 2h) > Explain ddl for debugging > ------------------------- > > Key: HIVE-24596 > URL: https://issues.apache.org/jira/browse/HIVE-24596 > Project: Hive > Issue Type: Improvement > Reporter: Rajesh Balamohan > Assignee: Harshit Gupta > Priority: Major > Labels: pull-request-available > Attachments: output, query, table_definitions > > Time Spent: 2h 10m > Remaining Estimate: 0h > > For debugging query issues, basic details like table schema, statistics, > partition details, query plans are needed. > It would be good to have "explain ddl" support, which can generate these > details. This can help in recreating the schema and planner issues without > sample data. > -- This message was sent by Atlassian Jira (v8.3.4#803005)