[ https://issues.apache.org/jira/browse/HIVE-20968?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16816630#comment-16816630 ]
Hive QA commented on HIVE-20968: -------------------------------- Here are the results of testing the latest attachment: https://issues.apache.org/jira/secure/attachment/12965742/HIVE-20968.02.patch {color:green}SUCCESS:{color} +1 due to 4 test(s) being added or modified. {color:red}ERROR:{color} -1 due to 62 failed/errored test(s), 15940 tests executed *Failed tests:* {noformat} org.apache.hadoop.hive.cli.TestCliDriver.testCliDriver[acid_stats4] (batchId=66) org.apache.hadoop.hive.cli.TestCliDriver.testCliDriver[alter_partition_format_loc] (batchId=58) org.apache.hadoop.hive.cli.TestCliDriver.testCliDriver[alter_table_location] (batchId=73) org.apache.hadoop.hive.cli.TestCliDriver.testCliDriver[alter_table_stats_status] (batchId=59) org.apache.hadoop.hive.cli.TestCliDriver.testCliDriver[schemeAuthority3] (batchId=83) org.apache.hadoop.hive.cli.TestMiniLlapCliDriver.testCliDriver[alter_table_location2] (batchId=158) org.apache.hadoop.hive.cli.TestMiniLlapCliDriver.testCliDriver[alter_table_location3] (batchId=154) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[constraint_duplicate_name] (batchId=100) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[create_not_acid] (batchId=101) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[create_view_failure2] (batchId=100) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[create_with_constraints_duplicate_name] (batchId=101) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[create_with_fk_constraint] (batchId=101) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[create_with_fk_pk_same_tab] (batchId=100) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[create_with_fk_uk_same_tab] (batchId=101) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[create_with_fk_wrong_ref2] (batchId=101) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[create_with_fk_wrong_ref] (batchId=100) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[druid_datasource2] (batchId=101) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[insert_sorted] (batchId=100) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[strict_managed_tables1] (batchId=101) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[strict_managed_tables4] (batchId=101) org.apache.hadoop.hive.cli.TestNegativeCliDriver.testCliDriver[strict_managed_tables5] (batchId=101) org.apache.hadoop.hive.metastore.TestAcidTableSetup.testTransactionalValidation (batchId=239) org.apache.hadoop.hive.metastore.TestHiveMetaStoreWithEnvironmentContext.testEnvironmentContext (batchId=226) org.apache.hadoop.hive.metastore.TestRetryingHMSHandler.testRetryingHMSHandler (batchId=227) org.apache.hadoop.hive.metastore.client.TestAddPartitions.noSuchCatalog[Embedded] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.noSuchCatalog[Remote] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionEmptyDb[Embedded] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionEmptyDb[Remote] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionEmptyTable[Embedded] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionEmptyTable[Remote] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionNonExistingDb[Embedded] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionNonExistingDb[Remote] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionNonExistingTable[Embedded] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionNonExistingTable[Remote] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionsNonExistingDb[Embedded] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitions.testAddPartitionsNonExistingDb[Remote] (batchId=221) org.apache.hadoop.hive.metastore.client.TestAddPartitionsFromPartSpec.testAddPartitionSpecEmptyDB[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAddPartitionsFromPartSpec.testAddPartitionSpecEmptyDB[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAddPartitionsFromPartSpec.testAddPartitionSpecEmptyTable[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAddPartitionsFromPartSpec.testAddPartitionSpecEmptyTable[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAddPartitionsFromPartSpec.testAddPartitionSpecNonExistingDB[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAddPartitionsFromPartSpec.testAddPartitionSpecNonExistingDB[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAddPartitionsFromPartSpec.testAddPartitionSpecNonExistingTable[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAddPartitionsFromPartSpec.testAddPartitionSpecNonExistingTable[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionBogusCatalog[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionBogusCatalog[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionEmptyDB[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionEmptyDB[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionEmptyTable[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionEmptyTable[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionNonExistingDB[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionNonExistingDB[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionNonExistingTable[Embedded] (batchId=223) org.apache.hadoop.hive.metastore.client.TestAppendPartitions.testAppendPartitionNonExistingTable[Remote] (batchId=223) org.apache.hadoop.hive.metastore.client.TestTablesCreateDropAlterTruncate.createTableInBogusCatalog[Embedded] (batchId=221) org.apache.hadoop.hive.metastore.client.TestTablesCreateDropAlterTruncate.createTableInBogusCatalog[Remote] (batchId=221) org.apache.hadoop.hive.ql.parse.TestReplicationWithTableMigrationEx.testConcurrentOpDuringBootStrapDumpCreateTableReplay (batchId=261) org.apache.hadoop.hive.ql.parse.TestReplicationWithTableMigrationEx.testConcurrentOpDuringBootStrapDumpInsertReplay (batchId=261) org.apache.hadoop.hive.ql.parse.TestReplicationWithTableMigrationEx.testIncLoadPenFlagPropAlterDB (batchId=261) org.apache.hadoop.hive.ql.parse.TestReplicationWithTableMigrationEx.testIncLoadPenFlagWithMoveOptimization (batchId=261) org.apache.hadoop.hive.ql.parse.TestStatsReplicationScenariosMigrationNoAutogather.testRetryFailure (batchId=255) org.apache.hive.hcatalog.api.TestHCatClient.testBasicDDLCommands (batchId=205) {noformat} Test results: https://builds.apache.org/job/PreCommit-HIVE-Build/16937/testReport Console output: https://builds.apache.org/job/PreCommit-HIVE-Build/16937/console Test logs: http://104.198.109.242/logs/PreCommit-HIVE-Build-16937/ Messages: {noformat} Executing org.apache.hive.ptest.execution.TestCheckPhase Executing org.apache.hive.ptest.execution.PrepPhase Executing org.apache.hive.ptest.execution.YetusPhase Executing org.apache.hive.ptest.execution.ExecutionPhase Executing org.apache.hive.ptest.execution.ReportingPhase Tests exited with: TestsFailedException: 62 tests failed {noformat} This message is automatically generated. ATTACHMENT ID: 12965742 - PreCommit-HIVE-Build > Support conversion of managed to external where location set was not owned by > hive > ---------------------------------------------------------------------------------- > > Key: HIVE-20968 > URL: https://issues.apache.org/jira/browse/HIVE-20968 > Project: Hive > Issue Type: Sub-task > Components: repl > Affects Versions: 4.0.0 > Reporter: mahesh kumar behera > Assignee: Sankar Hariappan > Priority: Major > Labels: DR, pull-request-available > Attachments: HIVE-20968.01.patch, HIVE-20968.02.patch > > Time Spent: 2h 20m > Remaining Estimate: 0h > > As per migration rule, if a location is outside the default managed table > directory and the location is not owned by "hive" user, then it should be > converted to external table after upgrade. > So, the same rule is applicable for Hive replication where the data of > source managed table is residing outside the default warehouse directory and > is not owned by "hive" user. > During this conversion, the path should be preserved in target as well so > that failover works seamlessly. > # If the table location is out side hive warehouse and is not owned by hive, > then the table at target will be converted to external table. But the > location can not be retained , it will be retained relative to hive external > warehouse directory. > # As the table is not an external table at source, only those data which > are added using events will be replicated. > # The ownership of the location will be stored in the create table event and > will be used to compare it with strict.managed.tables.migration.owner to > decide if the flag in replication scope can be set. This flag is used to > convert the managed table to external table at target. > Some of the scenarios needs to be blocked if the database is set for > replication from a cluster with non strict managed table setting to strict > managed table. > 1. Block alter table / partition set location for database with source of > replication set for managed tables > 2. If user manually changes the ownership of the location, hive replication > may go to a non recoverable state. > 3. Block add partition if the location ownership is different than table > location for managed tables. > 4. User needs to set strict.managed.tables.migration.owner along with dump > command (default to hive user). This value will be used during dump to decide > the ownership which will be used during load to decide the table type. The > location owner information can be stored in the events during create table. > The flag can be stored in replication spec. Check other such configs used in > upgrade tool. > 5. Block conversion from managed to external and vice versa. Pass some flag > in upgrade flow to allow this conversion during upgrade flow. -- This message was sent by Atlassian JIRA (v7.6.3#76005)