[ 
https://issues.apache.org/jira/browse/HIVE-22489?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16999613#comment-16999613
 ] 

Hive QA commented on HIVE-22489:
--------------------------------



Here are the results of testing the latest attachment:
https://issues.apache.org/jira/secure/attachment/12989129/HIVE-22489.5.patch

{color:green}SUCCESS:{color} +1 due to 7 test(s) being added or modified.

{color:red}ERROR:{color} -1 due to 154 failed/errored test(s), 17781 tests 
executed
*Failed tests:*
{noformat}
org.apache.hadoop.hive.cli.TestCliDriver.testCliDriver[hashjoin] (batchId=3)
org.apache.hadoop.hive.cli.TestEncryptedHDFSCliDriver.testCliDriver[encryption_join_with_different_encryption_keys]
 (batchId=194)
org.apache.hadoop.hive.cli.TestHBaseCliDriver.testCliDriver[hbase_bulk] 
(batchId=111)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[spark_use_ts_stats_for_mapjoin]
 (batchId=196)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[spark_vectorized_dynamic_partition_pruning]
 (batchId=195)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[vector_inner_join]
 (batchId=197)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[vector_outer_join0]
 (batchId=197)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[vector_outer_join1]
 (batchId=196)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[vector_outer_join2]
 (batchId=195)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[vector_outer_join3]
 (batchId=196)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[vector_outer_join4]
 (batchId=198)
org.apache.hadoop.hive.cli.TestMiniSparkOnYarnCliDriver.testCliDriver[vector_outer_join5]
 (batchId=198)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join10] 
(batchId=130)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join11] 
(batchId=118)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join12] 
(batchId=125)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join13] 
(batchId=151)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join14] 
(batchId=120)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join15] 
(batchId=121)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join17] 
(batchId=152)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join19] 
(batchId=144)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join1] 
(batchId=150)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join20] 
(batchId=155)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join21] 
(batchId=152)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join22] 
(batchId=139)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join24] 
(batchId=149)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join26] 
(batchId=120)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join29] 
(batchId=139)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join2] 
(batchId=143)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join30] 
(batchId=128)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join31] 
(batchId=135)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join3] 
(batchId=152)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join4] 
(batchId=146)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join5] 
(batchId=148)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join8] 
(batchId=153)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join9] 
(batchId=149)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join_filters] 
(batchId=141)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join_nulls] 
(batchId=145)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join_stats2] 
(batchId=154)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_join_stats] 
(batchId=136)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[auto_sortmerge_join_13]
 (batchId=143)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[bucket_map_join_tez1]
 (batchId=153)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[bucket_map_join_tez2]
 (batchId=119)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[identity_project_remove_skip]
 (batchId=137)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join25] 
(batchId=117)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join26] 
(batchId=123)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join27] 
(batchId=133)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join28] 
(batchId=153)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join29] 
(batchId=134)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join30] 
(batchId=150)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join31] 
(batchId=155)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join32] 
(batchId=122)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join32_lessSize] 
(batchId=117)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join33] 
(batchId=121)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join36] 
(batchId=154)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join37] 
(batchId=141)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join38] 
(batchId=151)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join39] 
(batchId=139)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join40] 
(batchId=138)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[join_star] 
(batchId=127)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[mapjoin_distinct] 
(batchId=140)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[mapjoin_filter_on_outerjoin]
 (batchId=142)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[mapjoin_mapjoin] 
(batchId=137)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[mapjoin_memcheck] 
(batchId=133)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[mapjoin_subquery2] 
(batchId=117)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[mapjoin_subquery] 
(batchId=137)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[mapjoin_test_outer] 
(batchId=114)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[runtime_skewjoin_mapjoin_spark]
 (batchId=139)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[subquery_in] 
(batchId=143)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[subquery_multiinsert]
 (batchId=152)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[subquery_notin] 
(batchId=146)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[subquery_scalar] 
(batchId=132)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[union22] 
(batchId=120)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[union34] 
(batchId=119)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[union_remove_12] 
(batchId=134)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[union_remove_13] 
(batchId=154)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[union_remove_14] 
(batchId=119)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[vector_decimal_mapjoin]
 (batchId=140)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[vector_left_outer_join]
 (batchId=124)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[vector_mapjoin_reduce]
 (batchId=151)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[vectorized_mapjoin] 
(batchId=148)
org.apache.hadoop.hive.cli.TestSparkCliDriver.testCliDriver[vectorized_nested_mapjoin]
 (batchId=122)
org.apache.hadoop.hive.cli.TestSparkPerfCliDriver.testCliDriver[query44] 
(batchId=303)
org.apache.hadoop.hive.ql.exec.spark.TestSmallTableCacheEviction.testSmallTableEvictionIfNewQueryIsExecuted
 (batchId=278)
org.apache.hadoop.hive.ql.exec.tez.TestVectorMapJoinFastHashTable.checkFast2estimations
 (batchId=347)
org.apache.hadoop.hive.ql.exec.vector.TestVectorSerDeRow.testVectorBinarySortableDeserializeRow
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.TestVectorSerDeRow.testVectorBinarySortableSerializeRow
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testLong2 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testLong3 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testLong3_NoRegularKeys
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testLong4 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testLong5 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testLong6 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testMultiKey0 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testMultiKey1 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testMultiKey2 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testMultiKey3 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testString0 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testString1 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.TestMapJoinOperator.testString2 
(batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMap.testExpand
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMap.testFullMap
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMap.testGetNonExistent
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMap.testLargeAndExpand
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMap.testMultipleKeysMultipleValue
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMap.testMultipleKeysSingleValue
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMap.testOneKey
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMapNonMatched.testExpand
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMapNonMatched.testLargeAndExpand
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMapNonMatched.testMultipleKeysMultipleValue
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMapNonMatched.testMultipleKeysSingleValue
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMapNonMatched.testOneKey
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMultiSet.testExpand
 (batchId=346)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMultiSet.testFullMap
 (batchId=346)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMultiSet.testGetNonExistent
 (batchId=346)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMultiSet.testLargeAndExpand
 (batchId=346)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMultiSet.testMultipleKeysMultipleValue
 (batchId=346)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMultiSet.testMultipleKeysSingleValue
 (batchId=346)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastLongHashMultiSet.testOneKey
 (batchId=346)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testBigIntRows
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testBigIntRowsClipped
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testBigIntRowsClippedExact
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testBigIntRowsExact
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testIntRows
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testIntRowsClipped
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testIntRowsClippedExact
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testIntRowsExact
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testStringRows
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testStringRowsClipped
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testStringRowsClippedExact
 (batchId=345)
org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.TestVectorMapJoinFastRowHashMap.testStringRowsExact
 (batchId=345)
org.apache.hive.beeline.cli.TestHiveCli.testCmd (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testCommentStripping (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testDatabaseOptions (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testErrOutput (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testHelp (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testInValidCmd (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testInvalidDatabaseOptions (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testInvalidOptions (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testInvalidOptions2 (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testNoErrorDB (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testSetHeaderValue (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testSetPromptValue (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testSourceCmd (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testSourceCmd2 (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testSourceCmd3 (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testSourceCmd4 (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testSqlFromCmd (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testSqlFromCmdWithDBName (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testUseCurrentDB1 (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testUseCurrentDB2 (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testUseCurrentDB3 (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testUseInvalidDB (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testVariables (batchId=206)
org.apache.hive.beeline.cli.TestHiveCli.testVariablesForSource (batchId=206)
{noformat}

Test results: 
https://builds.apache.org/job/PreCommit-HIVE-Build/19978/testReport
Console output: https://builds.apache.org/job/PreCommit-HIVE-Build/19978/console
Test logs: http://104.198.109.242/logs/PreCommit-HIVE-Build-19978/

Messages:
{noformat}
Executing org.apache.hive.ptest.execution.TestCheckPhase
Executing org.apache.hive.ptest.execution.PrepPhase
Executing org.apache.hive.ptest.execution.YetusPhase
Executing org.apache.hive.ptest.execution.ExecutionPhase
Executing org.apache.hive.ptest.execution.ReportingPhase
Tests exited with: TestsFailedException: 154 tests failed
{noformat}

This message is automatically generated.

ATTACHMENT ID: 12989129 - PreCommit-HIVE-Build

>  Reduce Sink operator should order nulls by parameter
> -----------------------------------------------------
>
>                 Key: HIVE-22489
>                 URL: https://issues.apache.org/jira/browse/HIVE-22489
>             Project: Hive
>          Issue Type: Bug
>          Components: Query Planning
>            Reporter: Krisztian Kasa
>            Assignee: Krisztian Kasa
>            Priority: Major
>         Attachments: HIVE-22489.1.patch, HIVE-22489.2.patch, 
> HIVE-22489.3.patch, HIVE-22489.3.patch, HIVE-22489.4.patch, HIVE-22489.5.patch
>
>
> When the property hive.default.nulls.last is set to true and no null order is 
> explicitly specified in the ORDER BY clause of the query null ordering should 
> be NULLS LAST.
> But some of the Reduce Sink operators still orders null first.
> {code}
> SET hive.default.nulls.last=true;
> EXPLAIN EXTENDED
> SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = 
> src2.key) ORDER BY src1.key LIMIT 5;
> {code}
> {code}
> PREHOOK: query: EXPLAIN EXTENDED
> SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = 
> src2.key) ORDER BY src1.key
> PREHOOK: type: QUERY
> PREHOOK: Input: default@src
> #### A masked pattern was here ####
> POSTHOOK: query: EXPLAIN EXTENDED
> SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = 
> src2.key) ORDER BY src1.key
> POSTHOOK: type: QUERY
> POSTHOOK: Input: default@src
> #### A masked pattern was here ####
> OPTIMIZED SQL: SELECT `t0`.`key`, `t2`.`value`
> FROM (SELECT `key`
> FROM `default`.`src`
> WHERE `key` IS NOT NULL) AS `t0`
> INNER JOIN (SELECT `key`, `value`
> FROM `default`.`src`
> WHERE `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
> ORDER BY `t0`.`key`
> STAGE DEPENDENCIES:
>   Stage-1 is a root stage
>   Stage-0 depends on stages: Stage-1
> STAGE PLANS:
>   Stage: Stage-1
>     Tez
> #### A masked pattern was here ####
>       Edges:
>         Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
>         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
> #### A masked pattern was here ####
>       Vertices:
>         Map 1 
>             Map Operator Tree:
>                 TableScan
>                   alias: src1
>                   filterExpr: key is not null (type: boolean)
>                   Statistics: Num rows: 500 Data size: 43500 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                   GatherStats: false
>                   Filter Operator
>                     isSamplingPred: false
>                     predicate: key is not null (type: boolean)
>                     Statistics: Num rows: 500 Data size: 43500 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                     Select Operator
>                       expressions: key (type: string)
>                       outputColumnNames: _col0
>                       Statistics: Num rows: 500 Data size: 43500 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                       Reduce Output Operator
>                         key expressions: _col0 (type: string)
>                         null sort order: a
>                         sort order: +
>                         Map-reduce partition columns: _col0 (type: string)
>                         Statistics: Num rows: 500 Data size: 43500 Basic 
> stats: COMPLETE Column stats: COMPLETE
>                         tag: 0
>                         auto parallelism: true
>             Execution mode: vectorized, llap
>             LLAP IO: no inputs
>             Path -> Alias:
> #### A masked pattern was here ####
>             Path -> Partition:
> #### A masked pattern was here ####
>                 Partition
>                   base file name: src
>                   input format: org.apache.hadoop.mapred.TextInputFormat
>                   output format: 
> org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
>                   properties:
>                     COLUMN_STATS_ACCURATE 
> {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
>                     bucket_count -1
>                     bucketing_version 2
>                     column.name.delimiter ,
>                     columns key,value
>                     columns.comments 'default','default'
>                     columns.types string:string
> #### A masked pattern was here ####
>                     name default.src
>                     numFiles 1
>                     numRows 500
>                     rawDataSize 5312
>                     serialization.ddl struct src { string key, string value}
>                     serialization.format 1
>                     serialization.lib 
> org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                     totalSize 5812
> #### A masked pattern was here ####
>                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                 
>                     input format: org.apache.hadoop.mapred.TextInputFormat
>                     output format: 
> org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
>                     properties:
>                       COLUMN_STATS_ACCURATE 
> {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
>                       bucket_count -1
>                       bucketing_version 2
>                       column.name.delimiter ,
>                       columns key,value
>                       columns.comments 'default','default'
>                       columns.types string:string
> #### A masked pattern was here ####
>                       name default.src
>                       numFiles 1
>                       numRows 500
>                       rawDataSize 5312
>                       serialization.ddl struct src { string key, string value}
>                       serialization.format 1
>                       serialization.lib 
> org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                       totalSize 5812
> #### A masked pattern was here ####
>                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                     name: default.src
>                   name: default.src
>             Truncated Path -> Alias:
>               /src [src1]
>         Map 4 
>             Map Operator Tree:
>                 TableScan
>                   alias: src2
>                   filterExpr: key is not null (type: boolean)
>                   Statistics: Num rows: 500 Data size: 89000 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                   GatherStats: false
>                   Filter Operator
>                     isSamplingPred: false
>                     predicate: key is not null (type: boolean)
>                     Statistics: Num rows: 500 Data size: 89000 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                     Select Operator
>                       expressions: key (type: string), value (type: string)
>                       outputColumnNames: _col0, _col1
>                       Statistics: Num rows: 500 Data size: 89000 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                       Reduce Output Operator
>                         key expressions: _col0 (type: string)
>                         null sort order: a
>                         sort order: +
>                         Map-reduce partition columns: _col0 (type: string)
>                         Statistics: Num rows: 500 Data size: 89000 Basic 
> stats: COMPLETE Column stats: COMPLETE
>                         tag: 1
>                         value expressions: _col1 (type: string)
>                         auto parallelism: true
>             Execution mode: vectorized, llap
>             LLAP IO: no inputs
>             Path -> Alias:
> #### A masked pattern was here ####
>             Path -> Partition:
> #### A masked pattern was here ####
>                 Partition
>                   base file name: src
>                   input format: org.apache.hadoop.mapred.TextInputFormat
>                   output format: 
> org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
>                   properties:
>                     COLUMN_STATS_ACCURATE 
> {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
>                     bucket_count -1
>                     bucketing_version 2
>                     column.name.delimiter ,
>                     columns key,value
>                     columns.comments 'default','default'
>                     columns.types string:string
> #### A masked pattern was here ####
>                     name default.src
>                     numFiles 1
>                     numRows 500
>                     rawDataSize 5312
>                     serialization.ddl struct src { string key, string value}
>                     serialization.format 1
>                     serialization.lib 
> org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                     totalSize 5812
> #### A masked pattern was here ####
>                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                 
>                     input format: org.apache.hadoop.mapred.TextInputFormat
>                     output format: 
> org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
>                     properties:
>                       COLUMN_STATS_ACCURATE 
> {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
>                       bucket_count -1
>                       bucketing_version 2
>                       column.name.delimiter ,
>                       columns key,value
>                       columns.comments 'default','default'
>                       columns.types string:string
> #### A masked pattern was here ####
>                       name default.src
>                       numFiles 1
>                       numRows 500
>                       rawDataSize 5312
>                       serialization.ddl struct src { string key, string value}
>                       serialization.format 1
>                       serialization.lib 
> org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                       totalSize 5812
> #### A masked pattern was here ####
>                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                     name: default.src
>                   name: default.src
>             Truncated Path -> Alias:
>               /src [src2]
>         Reducer 2 
>             Execution mode: llap
>             Needs Tagging: false
>             Reduce Operator Tree:
>               Merge Join Operator
>                 condition map:
>                      Inner Join 0 to 1
>                 keys:
>                   0 _col0 (type: string)
>                   1 _col0 (type: string)
>                 outputColumnNames: _col0, _col2
>                 Position of Big Table: 1
>                 Statistics: Num rows: 791 Data size: 140798 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                 Select Operator
>                   expressions: _col0 (type: string), _col2 (type: string)
>                   outputColumnNames: _col0, _col1
>                   Statistics: Num rows: 791 Data size: 140798 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                   Reduce Output Operator
>                     key expressions: _col0 (type: string)
>                     null sort order: z
>                     sort order: +
>                     Statistics: Num rows: 791 Data size: 140798 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                     tag: -1
>                     value expressions: _col1 (type: string)
>                     auto parallelism: false
>         Reducer 3 
>             Execution mode: vectorized, llap
>             Needs Tagging: false
>             Reduce Operator Tree:
>               Select Operator
>                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 
> (type: string)
>                 outputColumnNames: _col0, _col1
>                 Statistics: Num rows: 791 Data size: 140798 Basic stats: 
> COMPLETE Column stats: COMPLETE
>                 File Output Operator
>                   compressed: false
>                   GlobalTableId: 0
> #### A masked pattern was here ####
>                   NumFilesPerFileSink: 1
>                   Statistics: Num rows: 791 Data size: 140798 Basic stats: 
> COMPLETE Column stats: COMPLETE
> #### A masked pattern was here ####
>                   table:
>                       input format: 
> org.apache.hadoop.mapred.SequenceFileInputFormat
>                       output format: 
> org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
>                       properties:
>                         columns _col0,_col1
>                         columns.types string:string
>                         escape.delim \
>                         hive.serialization.extend.additional.nesting.levels 
> true
>                         serialization.escape.crlf true
>                         serialization.format 1
>                         serialization.lib 
> org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                       serde: 
> org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
>                   TotalFiles: 1
>                   GatherStats: false
>                   MultiFileSpray: false
>   Stage: Stage-0
>     Fetch Operator
>       limit: -1
>       Processor Tree:
>         ListSink
> {code}



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to