[ 
https://issues.apache.org/jira/browse/HIVE-23725?focusedWorklogId=479954&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-479954
 ]

ASF GitHub Bot logged work on HIVE-23725:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 08/Sep/20 09:08
            Start Date: 08/Sep/20 09:08
    Worklog Time Spent: 10m 
      Work Description: deniskuzZ commented on a change in pull request #1474:
URL: https://github.com/apache/hive/pull/1474#discussion_r484767687



##########
File path: ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
##########
@@ -2315,6 +2386,139 @@ private void 
testConcurrentMergeInsertNoDuplicates(String query, boolean sharedW
     List res = new ArrayList();
     driver.getFetchTask().fetch(res);
     Assert.assertEquals("Duplicate records found", 4, res.size());
+    dropTable(new String[]{"target", "source"});
+  }
+
+  /**
+   * ValidTxnManager.isValidTxnListState can invalidate a snapshot if a 
relevant write transaction was committed
+   * between a query compilation and lock acquisition. When this happens we 
have to recompile the given query,
+   * otherwise we can miss reading partitions created between. The following 
three cases test these scenarios.
+   * @throws Exception ex
+   */
+  @Test
+  public void testMergeInsertDynamicPartitioningSequential() throws Exception {
+    dropTable(new String[]{"target", "source"});
+    conf.setBoolVar(HiveConf.ConfVars.TXN_WRITE_X_LOCK, false);
+
+    // Create partition c=1
+    driver.run("create table target (a int, b int) partitioned by (c int) 
stored as orc TBLPROPERTIES ('transactional'='true')");
+    driver.run("insert into target values (1,1,1), (2,2,1)");
+    //Create partition c=2
+    driver.run("create table source (a int, b int) partitioned by (c int) 
stored as orc TBLPROPERTIES ('transactional'='true')");
+    driver.run("insert into source values (3,3,2), (4,4,2)");
+
+    // txn 1 inserts data to an old and a new partition
+    driver.run("insert into source values (5,5,2), (6,6,3)");
+
+    // txn 2 inserts into the target table into a new partition ( and a 
duplicate considering the source table)
+    driver.run("insert into target values (3, 3, 2)");
+
+    // txn3 merge
+    driver.run("merge into target t using source s on t.a = s.a " +
+      "when not matched then insert values (s.a, s.b, s.c)");
+    driver.run("select * from target");
+    List res = new ArrayList();
+    driver.getFetchTask().fetch(res);
+    // The merge should see all three partition and not create duplicates
+    Assert.assertEquals("Duplicate records found", 6, res.size());
+    Assert.assertTrue("Partition 3 was skipped", res.contains("6\t6\t3"));
+    dropTable(new String[]{"target", "source"});
+  }
+
+  @Test
+  public void 
testMergeInsertDynamicPartitioningSnapshotInvalidatedWithOldCommit() throws 
Exception {
+    // By creating the driver with the factory, we should have a ReExecDriver
+    IDriver driver3 = DriverFactory.newDriver(conf);
+    Assert.assertTrue("ReExecDriver was expected", driver3 instanceof 
ReExecDriver);

Review comment:
       changed

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/Driver.java
##########
@@ -488,30 +489,40 @@ private void runInternal(String command, boolean 
alreadyCompiled) throws Command
 
       lockAndRespond();
 
+      int retryShapshotCnt = 0;
+      int maxRetrySnapshotCnt = HiveConf.getIntVar(driverContext.getConf(),
+        HiveConf.ConfVars.HIVE_TXN_MAX_RETRYSNAPSHOT_COUNT);
+
       try {
-        if (!driverTxnHandler.isValidTxnListState()) {
-          LOG.info("Compiling after acquiring locks");
+        while (!driverTxnHandler.isValidTxnListState() && ++retryShapshotCnt 
<= maxRetrySnapshotCnt) {
+          LOG.info("Compiling after acquiring locks, attempt #" + 
retryShapshotCnt);
           // Snapshot was outdated when locks were acquired, hence regenerate 
context,
           // txn list and retry
           // TODO: Lock acquisition should be moved before analyze, this is a 
bit hackish.
           // Currently, we acquire a snapshot, we compile the query wrt that 
snapshot,
           // and then, we acquire locks. If snapshot is still valid, we 
continue as usual.
           // But if snapshot is not valid, we recompile the query.
           if (driverContext.isOutdatedTxn()) {
+            LOG.info("Snapshot is outdated, re-initiating transaction ...");
             driverContext.getTxnManager().rollbackTxn();
 
             String userFromUGI = DriverUtils.getUserFromUGI(driverContext);
             driverContext.getTxnManager().openTxn(context, userFromUGI, 
driverContext.getTxnType());
             lockAndRespond();
           }
+
           driverContext.setRetrial(true);
           driverContext.getBackupContext().addSubContext(context);
           
driverContext.getBackupContext().setHiveLocks(context.getHiveLocks());
           context = driverContext.getBackupContext();
+
           driverContext.getConf().set(ValidTxnList.VALID_TXNS_KEY,
             driverContext.getTxnManager().getValidTxns().toString());
+
           if (driverContext.getPlan().hasAcidResourcesInQuery()) {
+            compileInternal(context.getCmd(), true);
             driverTxnHandler.recordValidWriteIds();
+            driverTxnHandler.setWriteIdForAcidFileSinks();
           }
 
           if (!alreadyCompiled) {

Review comment:
       removed




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 479954)
    Time Spent: 7h 10m  (was: 7h)

> ValidTxnManager snapshot outdating causing partial reads in merge insert
> ------------------------------------------------------------------------
>
>                 Key: HIVE-23725
>                 URL: https://issues.apache.org/jira/browse/HIVE-23725
>             Project: Hive
>          Issue Type: Bug
>            Reporter: Peter Varga
>            Assignee: Peter Varga
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 7h 10m
>  Remaining Estimate: 0h
>
> When the ValidTxnManager invalidates the snapshot during merge insert and 
> starts to read committed transactions that were not committed when the query 
> compilation happened, it can cause partial read problems if the committed 
> transaction created new partition in the source or target table.
> The solution should be not only fix the snapshot but also recompile the query 
> and acquire the locks again.
> You could construct an example like this:
> 1. open and compile transaction 1 that merge inserts data from a partitioned 
> source table that has a few partition.
> 2. Open, run and commit transaction 2 that inserts data to an old and a new 
> partition to the source table.
> 3. Open, run and commit transaction 3 that inserts data to the target table 
> of the merge statement, that will retrigger a snapshot generation in 
> transaction 1.
> 4. Run transaction 1, the snapshot will be regenerated, and it will read 
> partial data from transaction 2 breaking the ACID properties.
> Different setup.
> Switch the transaction order:
> 1. compile transaction 1 that inserts data to an old and a new partition of 
> the source table.
> 2. compile transaction 2 that insert data to the target table
> 2. compile transaction 3 that merge inserts data from the source table to the 
> target table
> 3. run and commit transaction 1
> 4. run and commit transaction 2
> 5. run transaction 3, since it cointains 1 and 2 in its snaphot the 
> isValidTxnListState will be triggered and we do a partial read of the 
> transaction 1 for the same reasons.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to