[ 
https://issues.apache.org/jira/browse/HIVE-25225?focusedWorklogId=609640&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-609640
 ]

ASF GitHub Bot logged work on HIVE-25225:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 10/Jun/21 11:00
            Start Date: 10/Jun/21 11:00
    Worklog Time Spent: 10m 
      Work Description: pvary commented on a change in pull request #2376:
URL: https://github.com/apache/hive/pull/2376#discussion_r649077478



##########
File path: 
standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPartitionStat.java
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import 
org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+import org.apache.hadoop.hive.metastore.utils.FileUtils;
+
+import org.apache.thrift.TException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import com.google.common.collect.Lists;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Tests for updating partition column stats.
+ */
+@RunWith(Parameterized.class)
+@Category(MetastoreCheckinTest.class)
+public class TestPartitionStat extends MetaStoreClientTest {
+  private AbstractMetaStoreService metaStore;
+  private IMetaStoreClient client;
+  private String directSql = "false";
+
+  protected static final String DB_NAME = "test_part_stat";
+  protected static final String TABLE_NAME = "test_part_stat_table";
+  private static final String DEFAULT_COL_TYPE = "int";
+  private static final String PART_COL_NAME = "year";
+  protected static final short MAX = -1;
+  private static final Partition[] PARTITIONS = new Partition[5];
+  public static final String HIVE_ENGINE = "hive";
+
+  @BeforeClass
+  public static void startMetaStores() {
+    Map<MetastoreConf.ConfVars, String> msConf = new 
HashMap<MetastoreConf.ConfVars, String>();
+    // Enable trash, so it can be tested
+    Map<String, String> extraConf = new HashMap<>();
+    extraConf.put("fs.trash.checkpoint.interval", "30");  // 
FS_TRASH_CHECKPOINT_INTERVAL_KEY
+    extraConf.put("fs.trash.interval", "30");             // 
FS_TRASH_INTERVAL_KEY (hadoop-2)
+    startMetaStores(msConf, extraConf);
+  }
+
+  public TestPartitionStat(String name, AbstractMetaStoreService metaStore) {
+    this.metaStore = metaStore;
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // Get new client, store the original value of directSql to restore it 
back after test.
+    directSql = 
metaStore.getConf().get(MetastoreConf.ConfVars.TRY_DIRECT_SQL.getVarname());
+    client = metaStore.getClient();
+
+    // Clean up the database
+    client.dropDatabase(DB_NAME, true, true, true);
+    metaStore.cleanWarehouseDirs();
+    Database db = new DatabaseBuilder().
+        setName(DB_NAME).
+        create(client, metaStore.getConf());
+
+    // Create test tables with 3 partitions
+    createTable(TABLE_NAME, getYearPartCol(), null);
+    createPartitions();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    
metaStore.getConf().set(MetastoreConf.ConfVars.TRY_DIRECT_SQL.getVarname(), 
directSql);
+    try {
+      if (client != null) {
+        try {
+          client.close();
+        } catch (Exception e) {
+          // HIVE-19729: Shallow the exceptions based on the discussion in the 
Jira
+        }
+      }
+    } finally {
+      client = null;
+    }
+  }
+
+  public AbstractMetaStoreService getMetaStore() {
+    return metaStore;
+  }
+
+  public IMetaStoreClient getClient() {
+    return client;
+  }
+
+  public void setClient(IMetaStoreClient client) {
+    this.client = client;
+  }
+
+  private void createPartitions() throws Exception {
+    PARTITIONS[0] = createPartition(Lists.newArrayList("2017"), 
getYearPartCol());
+    PARTITIONS[1] = createPartition(Lists.newArrayList("2018"), 
getYearPartCol());
+    PARTITIONS[2] = createPartition(Lists.newArrayList("2019"), 
getYearPartCol());
+    PARTITIONS[3] = createPartition(Lists.newArrayList("2020"), 
getYearPartCol());
+    PARTITIONS[4] = createPartition(Lists.newArrayList("2021"), 
getYearPartCol());
+  }
+
+  private Table createTable(String tableName, List<FieldSchema> partCols,
+                            Map<String, String> tableParams) throws Exception {
+    String type = "MANAGED_TABLE";
+    String location = metaStore.getWarehouseRoot() + "/" + tableName;
+
+    if (tableParams != null) {
+      type = (tableParams.getOrDefault("EXTERNAL", 
"FALSE").equalsIgnoreCase("TRUE")) ?
+              "EXTERNAL_TABLE" : "MANAGED_TABLE";
+      location = (type.equalsIgnoreCase("EXTERNAL_TABLE")) ?
+              (metaStore.getExternalWarehouseRoot() + "/" + tableName) : 
(metaStore.getWarehouseRoot() + "/" + tableName);
+    }
+
+    Table table = new TableBuilder()
+            .setDbName(DB_NAME)
+            .setTableName(tableName)
+            .setType(type)
+            .addCol("test_id", "int", "test col id")
+            .addCol("test_value", "string", "test col value")
+            .setPartCols(partCols)
+            .setLocation(location)
+            .setTableParams(tableParams)
+            .create(client, metaStore.getConf());
+    return table;
+  }
+
+  private Partition createPartition(List<String> values,
+                                    List<FieldSchema> partCols) throws 
Exception {
+    new PartitionBuilder()
+            .setDbName(DB_NAME)
+            .setTableName(TABLE_NAME)
+            .setValues(values)
+            .setCols(partCols)
+            .addToTable(client, metaStore.getConf());
+    Partition partition = client.getPartition(DB_NAME, TABLE_NAME, values);
+    return partition;
+  }
+
+  private static List<FieldSchema> getYearPartCol() {
+    List<FieldSchema> cols = new ArrayList<>();
+    cols.add(new FieldSchema(PART_COL_NAME, DEFAULT_COL_TYPE, "year part 
col"));
+    return cols;
+  }
+
+  private ColumnStatisticsData createStatsData(long numNulls, long numDVs, 
long low, long high) {
+    ColumnStatisticsData data = new ColumnStatisticsData();
+    LongColumnStatsDataInspector stats = new LongColumnStatsDataInspector();
+    stats.setLowValue(low);
+    stats.setHighValue(high);
+    stats.setNumNulls(numNulls);
+    stats.setNumDVs(numDVs);
+    data.setLongStats(stats);
+    return data;
+  }
+
+  private ColumnStatistics createPartColStats(List<String> partValue, 
ColumnStatisticsData partitionStats) {
+    String pName = 
FileUtils.makePartName(Collections.singletonList(PART_COL_NAME), partValue);
+    ColumnStatistics colStats = new ColumnStatistics();
+    ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, DB_NAME, 
TABLE_NAME);
+    statsDesc.setPartName(pName);
+    colStats.setStatsDesc(statsDesc);
+    colStats.setEngine(HIVE_ENGINE);
+    statsDesc.setIsTblLevel(false);
+    ColumnStatisticsObj statObj = new ColumnStatisticsObj(PART_COL_NAME, 
"int", partitionStats);
+    colStats.addToStatsObj(statObj);
+    return colStats;
+  }
+
+  private void assertLongStatsEquals(LongColumnStatsData expectedData, 
LongColumnStatsData actualData) {
+    Assert.assertEquals(expectedData.getNumDVs(), actualData.getNumDVs());
+    Assert.assertEquals(expectedData.getNumNulls(), actualData.getNumNulls());
+    Assert.assertEquals(expectedData.getHighValue(), 
actualData.getHighValue());
+    Assert.assertEquals(expectedData.getLowValue(), actualData.getLowValue());
+    Assert.assertArrayEquals(expectedData.getBitVectors(), 
actualData.getBitVectors());
+  }
+
+  private List<String> updatePartColStat(Map<List<String>, 
ColumnStatisticsData> partitionStats) throws Exception {
+    SetPartitionsStatsRequest rqst = new SetPartitionsStatsRequest();
+    List<String> pNameList = new ArrayList<>();
+    for (Map.Entry entry : partitionStats.entrySet()) {
+      ColumnStatistics colStats = createPartColStats((List<String>) 
entry.getKey(),
+              (ColumnStatisticsData) entry.getValue());
+      String pName = 
FileUtils.makePartName(Collections.singletonList(PART_COL_NAME), (List<String>) 
entry.getKey());
+      rqst.addToColStats(colStats);
+      rqst.setEngine(HIVE_ENGINE);
+      pNameList.add(pName);
+    }
+    client.setPartitionColumnStatistics(rqst);
+    return pNameList;
+  }
+
+  private void validateStats(Map<List<String>, ColumnStatisticsData> 
partitionStats,
+                             List<String> pNameList) throws Exception {
+    Map<String, List<ColumnStatisticsObj>> statistics = 
client.getPartitionColumnStatistics(DB_NAME, TABLE_NAME,
+            pNameList, Collections.singletonList(PART_COL_NAME), HIVE_ENGINE);
+    for (Map.Entry entry : partitionStats.entrySet()) {
+      String pName = 
FileUtils.makePartName(Collections.singletonList(PART_COL_NAME), (List<String>) 
entry.getKey());
+      ColumnStatisticsObj statisticsObjs = statistics.get(pName).get(0);
+      ColumnStatisticsData data = (ColumnStatisticsData) entry.getValue();
+      assertLongStatsEquals(statisticsObjs.getStatsData().getLongStats(), 
data.getLongStats());
+    }
+  }
+
+  @Test
+  public void testUpdateStatSingle() throws Exception {
+    // For remote metastore setup is not there for column stats.
+    if (client.isLocalMetaStore()) {

Review comment:
       For this we can use `org.junit.Assume`




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 609640)
    Time Spent: 1h 20m  (was: 1h 10m)

> Update column stat throws NPE if direct sql is disabled
> -------------------------------------------------------
>
>                 Key: HIVE-25225
>                 URL: https://issues.apache.org/jira/browse/HIVE-25225
>             Project: Hive
>          Issue Type: Sub-task
>            Reporter: mahesh kumar behera
>            Assignee: mahesh kumar behera
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 1h 20m
>  Remaining Estimate: 0h
>
> In case direct sql is disabled, the MetaStoreDirectSql object is not 
> initialised and thats causing NPE. 



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to