[ 
https://issues.apache.org/jira/browse/HIVE-13353?focusedWorklogId=815085&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-815085
 ]

ASF GitHub Bot logged work on HIVE-13353:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 10/Oct/22 03:51
            Start Date: 10/Oct/22 03:51
    Worklog Time Spent: 10m 
      Work Description: rkirtir commented on code in PR #3608:
URL: https://github.com/apache/hive/pull/3608#discussion_r990898269


##########
ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java:
##########
@@ -2117,4 +2121,168 @@ public void testIsRawFormatFile() throws Exception {
     List<String> res = runStatementOnDriver("select * from file_formats");
     Assert.assertEquals(3, res.size());
   }
+  @Test
+  public void testShowCompactions() throws Exception {
+    d.destroy();
+    hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+    d = new Driver(hiveConf);
+    //generate some compaction history
+    runStatementOnDriver("drop database if exists mydb1 cascade");
+    runStatementOnDriver("create database mydb1");
+    runStatementOnDriver("create table mydb1.tbl0 " + "(a int, b int) 
partitioned by (p string) clustered by (a) into " +
+      BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES 
('transactional'='true')");
+    runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+      " 
values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')");
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p1') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p2') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p3') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+      " 
values(4,5,'p1'),(6,7,'p1'),(4,5,'p2'),(6,7,'p2'),(4,5,'p3'),(6,7,'p3')");
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p1') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p2') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p3')  
compact 'MAJOR' pool 'pool0'");
+    TestTxnCommands2.runWorker(hiveConf);
+    TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
+
+    SessionState.get().setCurrentDatabase("mydb1");
+
+    //testing show compaction command
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<String> r = runStatementOnDriver("SHOW COMPACTIONS");
+    Assert.assertEquals(rsp.getCompacts().size()+1, r.size());//includes 
Header row
+
+
+    r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 STATUS 'ready for 
cleaning'");
+    
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getState().equals("ready
 for cleaning")).count() +1,

Review Comment:
   rsp is result from  txnHandler.showCompact(new ShowCompactRequest()) 
    and  it is checked against r which is a result of driver command.
   
   
   r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 POOL 'poolx' TYPE 
'MINOR' ");
       //includes Header row
   
Assert.assertEquals(**rsp**.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
         
filter(x->x.getPoolName().equals("poolx")).filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
 **r**.size());



##########
ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java:
##########
@@ -2117,4 +2121,168 @@ public void testIsRawFormatFile() throws Exception {
     List<String> res = runStatementOnDriver("select * from file_formats");
     Assert.assertEquals(3, res.size());
   }
+  @Test
+  public void testShowCompactions() throws Exception {
+    d.destroy();
+    hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+    d = new Driver(hiveConf);
+    //generate some compaction history
+    runStatementOnDriver("drop database if exists mydb1 cascade");
+    runStatementOnDriver("create database mydb1");
+    runStatementOnDriver("create table mydb1.tbl0 " + "(a int, b int) 
partitioned by (p string) clustered by (a) into " +
+      BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES 
('transactional'='true')");
+    runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+      " 
values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')");
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p1') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p2') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p3') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+      " 
values(4,5,'p1'),(6,7,'p1'),(4,5,'p2'),(6,7,'p2'),(4,5,'p3'),(6,7,'p3')");
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p1') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p2') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p3')  
compact 'MAJOR' pool 'pool0'");
+    TestTxnCommands2.runWorker(hiveConf);
+    TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
+
+    SessionState.get().setCurrentDatabase("mydb1");
+
+    //testing show compaction command
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<String> r = runStatementOnDriver("SHOW COMPACTIONS");
+    Assert.assertEquals(rsp.getCompacts().size()+1, r.size());//includes 
Header row
+
+
+    r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 STATUS 'ready for 
cleaning'");
+    
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getState().equals("ready
 for cleaning")).count() +1,
+      r.size());//includes Header row
+
+    r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 TYPE 'MAJOR' ");
+    
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
+      filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1, 
r.size());//includes Header row
+
+    r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 POOL 'poolx' TYPE 
'MINOR' ");
+    //includes Header row
+    
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
+      
filter(x->x.getPoolName().equals("poolx")).filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
 r.size());
+
+    r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 POOL 'pool0' TYPE 
'MAJOR'");
+    Assert.assertEquals(2, r.size());//includes Header row
+
+    r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 POOL 'pool0'");
+    
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
+      filter(x->x.getPoolName().equals("pool0")).count()+1, 
r.size());//includes Header row
+
+    r = runStatementOnDriver("SHOW COMPACTIONS DATABASE mydb1 POOL 'pool0'");
+    Assert.assertEquals(2, r.size());//includes Header row
+
+    r = runStatementOnDriver("SHOW COMPACTIONS tbl0 TYPE 'MAJOR' ");

Review Comment:
   We are following below syntax:
   SHOW COMPACTIONS <[DATABASE|SCHEMA <db>]|[[<db>.]<table> [PARTITION 
(<partition_spec>)]]> [POOL <pool_name>] [TYPE <type>] [STATE <state>] [ORDER 
BY <ob clause>] [LIMIT <number>]
   



##########
parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseShowCompactions.java:
##########
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.core.Is.is;
+
+public class TestParseShowCompactions {
+    ParseDriver parseDriver = new ParseDriver();
+
+    @Test
+    public void testShowCompactions() throws Exception {
+        ASTNode tree = parseDriver.parse(
+                "SHOW COMPACTIONS", null).getTree();
+
+        assertThat(tree.toStringTree(), is("tok_show_compactions <eof>"));
+    }
+
+    @Test
+    public void testShowCompactionsFilterDb() throws Exception {
+        ASTNode tree = parseDriver.parse(
+                "SHOW COMPACTIONS DATABASE db1", null).getTree();
+
+        assertThat(tree.toStringTree(), is("(tok_show_compactions db1) 
<eof>"));
+    }
+
+    private static final String 
EXPECTED_WHEN_FILTER_BY_DB_AND_ALL_AND_ORDER_BY = "\n" +
+            "nil\n" +
+            "   TOK_SHOW_COMPACTIONS\n" +
+            "      db1\n" +
+            "      TOK_COMPACT_POOL\n" +
+            "         'pool0'\n" +
+            "      TOK_COMPACTION_TYPE\n" +
+            "         'minor'\n" +
+            "      TOK_COMPACTION_STATUS\n" +
+            "         'ready for clean'\n" +
+            "      TOK_ORDERBY\n" +
+            "         TOK_TABSORTCOLNAMEDESC\n" +
+            "            TOK_NULLS_FIRST\n" +
+            "               TOK_TABLE_OR_COL\n" +
+            "                  cq_table\n" +
+            "         TOK_TABSORTCOLNAMEASC\n" +
+            "            TOK_NULLS_LAST\n" +
+            "               TOK_TABLE_OR_COL\n" +
+            "                  cq_state\n" +
+            "      TOK_LIMIT\n" +
+            "         42\n" +
+            "   <EOF>\n";
+
+    @Test
+    public void testShowCompactionsFilterDbAndAllAndOrder() throws Exception {
+        ASTNode tree = parseDriver.parse(
+                "SHOW COMPACTIONS DATABASE db1 POOL 'pool0' TYPE 'minor' 
STATUS 'ready for clean' ORDER BY cq_table DESC, cq_state LIMIT 42", 
null).getTree();

Review Comment:
   order by and limit functionality is part of other JIRA
   [HIVE-26580](https://issues.apache.org/jira/browse/HIVE-26580)
   SHOW COMPACTIONS should support ordering and limiting functionality in 
filtering options





Issue Time Tracking
-------------------

    Worklog Id:     (was: 815085)
    Time Spent: 1.5h  (was: 1h 20m)

> SHOW COMPACTIONS should support filtering options
> -------------------------------------------------
>
>                 Key: HIVE-13353
>                 URL: https://issues.apache.org/jira/browse/HIVE-13353
>             Project: Hive
>          Issue Type: Improvement
>          Components: Transactions
>    Affects Versions: 1.3.0, 2.0.0
>            Reporter: Eugene Koifman
>            Assignee: KIRTI RUGE
>            Priority: Major
>              Labels: pull-request-available
>         Attachments: HIVE-13353.01.patch
>
>          Time Spent: 1.5h
>  Remaining Estimate: 0h
>
> Since we now have historical information in SHOW COMPACTIONS the output can 
> easily become unwieldy. (e.g. 1000 partitions with 3 lines of history each)
> this is a significant usability issue
> Need to add ability to filter by db/table/partition
> Perhaps would also be useful to filter by status



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to