[ 
https://issues.apache.org/jira/browse/HIVE-16780?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

liyunzhang_intel updated HIVE-16780:
------------------------------------
    Description: 
script.q
{code}
set hive.optimize.ppd=true;
set hive.ppd.remove.duplicatefilters=true;
set hive.spark.dynamic.partition.pruning=true;
set hive.optimize.metadataonly=false;
set hive.optimize.index.filter=true;
set hive.strict.checks.cartesian.product=false;
set hive.spark.dynamic.partition.pruning=true;

-- multiple sources, single key
select count(*) from srcpart join srcpart_date on (srcpart.ds = 
srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
{code}

if disabling "hive.optimize.index.filter", case passes otherwise it always hang 
out in the first job. Exception
{code}
17/05/27 23:39:45 DEBUG Executor task launch worker-0 PerfLogger: </PERFLOG 
method=SparkInitializeOperators start=1495899585574 end=1495899585933 
duration=359 from=org.apache.hadoop.hive.ql.exec.spark.SparkRecordHandler>
17/05/27 23:39:45 INFO Executor task launch worker-0 Utilities: PLAN PATH = 
hdfs://bdpe41:8020/tmp/hive/root/029a2d8a-c6e5-4ea9-adea-ef8fbea3cde2/hive_2017-05-27_23-39-06_464_5915518562441677640-1/-mr-10007/617d9dd6-9f9a-4786-8131-a7b98e8abc3e/map.xml
17/05/27 23:39:45 DEBUG Executor task launch worker-0 Utilities: Found plan in 
cache for name: map.xml
17/05/27 23:39:45 DEBUG Executor task launch worker-0 DFSClient: Connecting to 
datanode 10.239.47.162:50010
17/05/27 23:39:45 DEBUG Executor task launch worker-0 MapOperator: Processing 
alias(es) srcpart_hour for file 
hdfs://bdpe41:8020/user/hive/warehouse/srcpart_hour/000008_0
17/05/27 23:39:45 DEBUG Executor task launch worker-0 ObjectCache: Creating 
root_20170527233906_ac2934e1-2e58-4116-9f0d-35dee302d689_DynamicValueRegistry
17/05/27 23:39:45 ERROR Executor task launch worker-0 SparkMapRecordHandler: 
Error processing row: org.apache.hadoop.hive.ql.metadata.HiveException: Hive 
Runtime Error while processing row {"hr":"11","hour":"11"}
org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while 
processing row {"hr":"11","hour":"11"}
     at org.apache.hadoop.hive.ql.exec.MapOperator.process(MapOperator.java:562)
     at 
org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.processRow(SparkMapRecordHandler.java:136)
     at 
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunctionResultList.processNextRecord(HiveMapFunctionResultList.java:48)
     at 
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunctionResultList.processNextRecord(HiveMapFunctionResultList.java:27)
     at 
org.apache.hadoop.hive.ql.exec.spark.HiveBaseFunctionResultList.hasNext(HiveBaseFunctionResultList.java:85)
     at 
scala.collection.convert.Wrappers$JIteratorWrapper.hasNext(Wrappers.scala:42)
     at scala.collection.Iterator$class.foreach(Iterator.scala:893)
     at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
     at 
org.apache.spark.rdd.AsyncRDDActions$$anonfun$foreachAsync$1$$anonfun$apply$12.apply(AsyncRDDActions.scala:127)
     at 
org.apache.spark.rdd.AsyncRDDActions$$anonfun$foreachAsync$1$$anonfun$apply$12.apply(AsyncRDDActions.scala:127)
     at org.apache.spark.SparkContext$$anonfun$33.apply(SparkContext.scala:1974)
     at org.apache.spark.SparkContext$$anonfun$33.apply(SparkContext.scala:1974)
     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
     at org.apache.spark.scheduler.Task.run(Task.scala:85)
     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
     at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
     at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
     at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalStateException: Failed to retrieve dynamic value 
for RS_7_srcpart__col3_min
     at 
org.apache.hadoop.hive.ql.plan.DynamicValue.getValue(DynamicValue.java:126)
     at 
org.apache.hadoop.hive.ql.plan.DynamicValue.getWritableValue(DynamicValue.java:101)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeDynamicValueEvaluator._evaluate(ExprNodeDynamicValueEvaluator.java:51)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
     at 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan.evaluate(GenericUDFOPEqualOrGreaterThan.java:108)
     at 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween.evaluate(GenericUDFBetween.java:57)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
     at 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd.evaluate(GenericUDFOPAnd.java:63)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
     at 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd.evaluate(GenericUDFOPAnd.java:63)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorHead._evaluate(ExprNodeEvaluatorHead.java:44)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:68)
     at 
org.apache.hadoop.hive.ql.exec.FilterOperator.process(FilterOperator.java:112)
     at org.apache.hadoop.hive.ql.exec.Operator.forward(Operator.java:897)
     at 
org.apache.hadoop.hive.ql.exec.TableScanOperator.process(TableScanOperator.java:130)
     at 
org.apache.hadoop.hive.ql.exec.MapOperator$MapOpCtx.forward(MapOperator.java:148)
     at org.apache.hadoop.hive.ql.exec.MapOperator.process(MapOperator.java:547)
     ... 17 more
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: 
java.lang.NullPointerException
     at 
org.apache.hadoop.hive.ql.exec.mr.ObjectCache.retrieve(ObjectCache.java:62)
     at 
org.apache.hadoop.hive.ql.exec.mr.ObjectCache.retrieve(ObjectCache.java:51)
     at 
org.apache.hadoop.hive.ql.exec.ObjectCacheWrapper.retrieve(ObjectCacheWrapper.java:40)
     at 
org.apache.hadoop.hive.ql.plan.DynamicValue.getValue(DynamicValue.java:119)
     ... 41 more
Caused by: java.lang.NullPointerException
     at 
org.apache.hadoop.hive.ql.exec.mr.ObjectCache.retrieve(ObjectCache.java:60)
     ... 44 more
17/05/27 23:39:45 ERROR Executor task launch worker-0 Executor: Exception in 
task 1.0 in stage 0.0 (TID 1)
java.lang.RuntimeException: Error processing row: 
org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while 
processing row {"hr":"11","hour":"11"}
     at 
org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.processRow(SparkMapRecordHandler.java:149)
     at 
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunctionResultList.processNextRecord(HiveMapFunctionResultList.java:48)
     at 
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunctionResultList.processNextRecord(HiveMapFunctionResultList.java:27)
     at 
org.apache.hadoop.hive.ql.exec.spark.HiveBaseFunctionResultList.hasNext(HiveBaseFunctionResultList.java:85)
     at 
scala.collection.convert.Wrappers$JIteratorWrapper.hasNext(Wrappers.scala:42)
     at scala.collection.Iterator$class.foreach(Iterator.scala:893)
     at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
     at 
org.apache.spark.rdd.AsyncRDDActions$$anonfun$foreachAsync$1$$anonfun$apply$12.apply(AsyncRDDActions.scala:127)
     at 
org.apache.spark.rdd.AsyncRDDActions$$anonfun$foreachAsync$1$$anonfun$apply$12.apply(AsyncRDDActions.scala:127)
     at org.apache.spark.SparkContext$$anonfun$33.apply(SparkContext.scala:1974)
     at org.apache.spark.SparkContext$$anonfun$33.apply(SparkContext.scala:1974)
     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
     at org.apache.spark.scheduler.Task.run(Task.scala:85)
     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
     at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
     at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
     at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error 
while processing row {"hr":"11","hour":"11"}
     at org.apache.hadoop.hive.ql.exec.MapOperator.process(MapOperator.java:562)
     at 
org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.processRow(SparkMapRecordHandler.java:136)
     ... 16 more
Caused by: java.lang.IllegalStateException: Failed to retrieve dynamic value 
for RS_7_srcpart__col3_min
     at 
org.apache.hadoop.hive.ql.plan.DynamicValue.getValue(DynamicValue.java:126)
     at 
org.apache.hadoop.hive.ql.plan.DynamicValue.getWritableValue(DynamicValue.java:101)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeDynamicValueEvaluator._evaluate(ExprNodeDynamicValueEvaluator.java:51)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
     at 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan.evaluate(GenericUDFOPEqualOrGreaterThan.java:108)
     at 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween.evaluate(GenericUDFBetween.java:57)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
     at 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd.evaluate(GenericUDFOPAnd.java:63)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
     at 
org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
     at org.apache.hadoop.hive
{code} 

  was:
script.q
{code}
set hive.optimize.ppd=true;
set hive.ppd.remove.duplicatefilters=true;
set hive.spark.dynamic.partition.pruning=true;
set hive.optimize.metadataonly=false;
set hive.optimize.index.filter=true;
set hive.strict.checks.cartesian.product=false;
set hive.spark.dynamic.partition.pruning=true;

-- multiple sources, single key
select count(*) from srcpart join srcpart_date on (srcpart.ds = 
srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
{code}

exception 
{code}
job failed with java.io.FileNotFoundException: File 
hdfs://bdpe41:8020/tmp/hive/root/de80d82a-b910-4b87-940c-6be3ea37ba25/hive_2017-05-27_14-55-30_114_8497388836256415979-1/-mr-10004/1/5
 does not exist.
FAILED: Execution Error, return code 3 from 
org.apache.hadoop.hive.ql.exec.spark.SparkTask. java.lang.RuntimeException: 
org.apache.hadoop.hive.ql.metadata.HiveException: 
java.io.FileNotFoundException: File 
hdfs://bdpe41:8020/tmp/hive/root/de80d82a-b910-4b87-940c-6be3ea37ba25/hive_2017-05-27_14-55-30_114_8497388836256415979-1/-mr-10004/1/5
 does not exist.
    at 
org.apache.hadoop.hive.ql.io.HiveInputFormat.init(HiveInputFormat.java:404)
    at 
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getSplits(CombineHiveInputFormat.java:498)
    at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:200)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
    at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
    at org.apache.spark.ShuffleDependency.<init>(Dependency.scala:91)
    at org.apache.spark.rdd.ShuffledRDD.getDependencies(ShuffledRDD.scala:91)
    at org.apache.spark.rdd.RDD$$anonfun$dependencies$2.apply(RDD.scala:235)
    at org.apache.spark.rdd.RDD$$anonfun$dependencies$2.apply(RDD.scala:233)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.dependencies(RDD.scala:233)
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddToString(SparkUtilities.java:144)
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddToString(SparkUtilities.java:149)
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddToString(SparkUtilities.java:149)
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddToString(SparkUtilities.java:149)
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddGraphToString(SparkUtilities.java:134)
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkPlan.generateGraph(SparkPlan.java:93)
    at 
org.apache.hadoop.hive.ql.exec.spark.RemoteHiveSparkClient$JobStatusJob.call(RemoteHiveSparkClient.java:349)
    at 
org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:358)
    at 
org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:323)
    at java.util.concurrent.FutureTask.run(FutureTask.java:266)
    at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: 
java.io.FileNotFoundException: File 
hdfs://bdpe41:8020/tmp/hive/root/de80d82a-b910-4b87-940c-6be3ea37ba25/hive_2017-05-27_14-55-30_114_8497388836256415979-1/-mr-10004/1/5
 does not exist.
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkDynamicPartitionPruner.processFiles(SparkDynamicPartitionPruner.java:147)
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkDynamicPartitionPruner.prune(SparkDynamicPartitionPruner.java:76)
    at 
org.apache.hadoop.hive.ql.io.HiveInputFormat.init(HiveInputFormat.java:402)
    ... 30 more
Caused by: java.io.FileNotFoundException: File 
hdfs://bdpe41:8020/tmp/hive/root/de80d82a-b910-4b87-940c-6be3ea37ba25/hive_2017-05-27_14-55-30_114_8497388836256415979-1/-mr-10004/1/5
 does not exist.
    at 
org.apache.hadoop.hdfs.DistributedFileSystem.listStatusInternal(DistributedFileSystem.java:795)
    at 
org.apache.hadoop.hdfs.DistributedFileSystem.access$700(DistributedFileSystem.java:106)
    at 
org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:853)
    at 
org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:849)
    at 
org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
    at 
org.apache.hadoop.hdfs.DistributedFileSystem.listStatus(DistributedFileSystem.java:860)
    at 
org.apache.hadoop.hive.ql.exec.spark.SparkDynamicPartitionPruner.processFiles(SparkDynamicPartitionPruner.java:119)
    ... 32 more

{code}



> Case "multiple sources, single key" in spark_dynamic_pruning.q fails 
> ---------------------------------------------------------------------
>
>                 Key: HIVE-16780
>                 URL: https://issues.apache.org/jira/browse/HIVE-16780
>             Project: Hive
>          Issue Type: Bug
>            Reporter: liyunzhang_intel
>            Assignee: liyunzhang_intel
>
> script.q
> {code}
> set hive.optimize.ppd=true;
> set hive.ppd.remove.duplicatefilters=true;
> set hive.spark.dynamic.partition.pruning=true;
> set hive.optimize.metadataonly=false;
> set hive.optimize.index.filter=true;
> set hive.strict.checks.cartesian.product=false;
> set hive.spark.dynamic.partition.pruning=true;
> -- multiple sources, single key
> select count(*) from srcpart join srcpart_date on (srcpart.ds = 
> srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
> {code}
> if disabling "hive.optimize.index.filter", case passes otherwise it always 
> hang out in the first job. Exception
> {code}
> 17/05/27 23:39:45 DEBUG Executor task launch worker-0 PerfLogger: </PERFLOG 
> method=SparkInitializeOperators start=1495899585574 end=1495899585933 
> duration=359 from=org.apache.hadoop.hive.ql.exec.spark.SparkRecordHandler>
> 17/05/27 23:39:45 INFO Executor task launch worker-0 Utilities: PLAN PATH = 
> hdfs://bdpe41:8020/tmp/hive/root/029a2d8a-c6e5-4ea9-adea-ef8fbea3cde2/hive_2017-05-27_23-39-06_464_5915518562441677640-1/-mr-10007/617d9dd6-9f9a-4786-8131-a7b98e8abc3e/map.xml
> 17/05/27 23:39:45 DEBUG Executor task launch worker-0 Utilities: Found plan 
> in cache for name: map.xml
> 17/05/27 23:39:45 DEBUG Executor task launch worker-0 DFSClient: Connecting 
> to datanode 10.239.47.162:50010
> 17/05/27 23:39:45 DEBUG Executor task launch worker-0 MapOperator: Processing 
> alias(es) srcpart_hour for file 
> hdfs://bdpe41:8020/user/hive/warehouse/srcpart_hour/000008_0
> 17/05/27 23:39:45 DEBUG Executor task launch worker-0 ObjectCache: Creating 
> root_20170527233906_ac2934e1-2e58-4116-9f0d-35dee302d689_DynamicValueRegistry
> 17/05/27 23:39:45 ERROR Executor task launch worker-0 SparkMapRecordHandler: 
> Error processing row: org.apache.hadoop.hive.ql.metadata.HiveException: Hive 
> Runtime Error while processing row {"hr":"11","hour":"11"}
> org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while 
> processing row {"hr":"11","hour":"11"}
>      at 
> org.apache.hadoop.hive.ql.exec.MapOperator.process(MapOperator.java:562)
>      at 
> org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.processRow(SparkMapRecordHandler.java:136)
>      at 
> org.apache.hadoop.hive.ql.exec.spark.HiveMapFunctionResultList.processNextRecord(HiveMapFunctionResultList.java:48)
>      at 
> org.apache.hadoop.hive.ql.exec.spark.HiveMapFunctionResultList.processNextRecord(HiveMapFunctionResultList.java:27)
>      at 
> org.apache.hadoop.hive.ql.exec.spark.HiveBaseFunctionResultList.hasNext(HiveBaseFunctionResultList.java:85)
>      at 
> scala.collection.convert.Wrappers$JIteratorWrapper.hasNext(Wrappers.scala:42)
>      at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>      at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>      at 
> org.apache.spark.rdd.AsyncRDDActions$$anonfun$foreachAsync$1$$anonfun$apply$12.apply(AsyncRDDActions.scala:127)
>      at 
> org.apache.spark.rdd.AsyncRDDActions$$anonfun$foreachAsync$1$$anonfun$apply$12.apply(AsyncRDDActions.scala:127)
>      at 
> org.apache.spark.SparkContext$$anonfun$33.apply(SparkContext.scala:1974)
>      at 
> org.apache.spark.SparkContext$$anonfun$33.apply(SparkContext.scala:1974)
>      at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
>      at org.apache.spark.scheduler.Task.run(Task.scala:85)
>      at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
>      at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>      at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>      at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.IllegalStateException: Failed to retrieve dynamic value 
> for RS_7_srcpart__col3_min
>      at 
> org.apache.hadoop.hive.ql.plan.DynamicValue.getValue(DynamicValue.java:126)
>      at 
> org.apache.hadoop.hive.ql.plan.DynamicValue.getWritableValue(DynamicValue.java:101)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeDynamicValueEvaluator._evaluate(ExprNodeDynamicValueEvaluator.java:51)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
>      at 
> org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan.evaluate(GenericUDFOPEqualOrGreaterThan.java:108)
>      at 
> org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween.evaluate(GenericUDFBetween.java:57)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
>      at 
> org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd.evaluate(GenericUDFOPAnd.java:63)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
>      at 
> org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd.evaluate(GenericUDFOPAnd.java:63)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorHead._evaluate(ExprNodeEvaluatorHead.java:44)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:68)
>      at 
> org.apache.hadoop.hive.ql.exec.FilterOperator.process(FilterOperator.java:112)
>      at org.apache.hadoop.hive.ql.exec.Operator.forward(Operator.java:897)
>      at 
> org.apache.hadoop.hive.ql.exec.TableScanOperator.process(TableScanOperator.java:130)
>      at 
> org.apache.hadoop.hive.ql.exec.MapOperator$MapOpCtx.forward(MapOperator.java:148)
>      at 
> org.apache.hadoop.hive.ql.exec.MapOperator.process(MapOperator.java:547)
>      ... 17 more
> Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: 
> java.lang.NullPointerException
>      at 
> org.apache.hadoop.hive.ql.exec.mr.ObjectCache.retrieve(ObjectCache.java:62)
>      at 
> org.apache.hadoop.hive.ql.exec.mr.ObjectCache.retrieve(ObjectCache.java:51)
>      at 
> org.apache.hadoop.hive.ql.exec.ObjectCacheWrapper.retrieve(ObjectCacheWrapper.java:40)
>      at 
> org.apache.hadoop.hive.ql.plan.DynamicValue.getValue(DynamicValue.java:119)
>      ... 41 more
> Caused by: java.lang.NullPointerException
>      at 
> org.apache.hadoop.hive.ql.exec.mr.ObjectCache.retrieve(ObjectCache.java:60)
>      ... 44 more
> 17/05/27 23:39:45 ERROR Executor task launch worker-0 Executor: Exception in 
> task 1.0 in stage 0.0 (TID 1)
> java.lang.RuntimeException: Error processing row: 
> org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while 
> processing row {"hr":"11","hour":"11"}
>      at 
> org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.processRow(SparkMapRecordHandler.java:149)
>      at 
> org.apache.hadoop.hive.ql.exec.spark.HiveMapFunctionResultList.processNextRecord(HiveMapFunctionResultList.java:48)
>      at 
> org.apache.hadoop.hive.ql.exec.spark.HiveMapFunctionResultList.processNextRecord(HiveMapFunctionResultList.java:27)
>      at 
> org.apache.hadoop.hive.ql.exec.spark.HiveBaseFunctionResultList.hasNext(HiveBaseFunctionResultList.java:85)
>      at 
> scala.collection.convert.Wrappers$JIteratorWrapper.hasNext(Wrappers.scala:42)
>      at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>      at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>      at 
> org.apache.spark.rdd.AsyncRDDActions$$anonfun$foreachAsync$1$$anonfun$apply$12.apply(AsyncRDDActions.scala:127)
>      at 
> org.apache.spark.rdd.AsyncRDDActions$$anonfun$foreachAsync$1$$anonfun$apply$12.apply(AsyncRDDActions.scala:127)
>      at 
> org.apache.spark.SparkContext$$anonfun$33.apply(SparkContext.scala:1974)
>      at 
> org.apache.spark.SparkContext$$anonfun$33.apply(SparkContext.scala:1974)
>      at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
>      at org.apache.spark.scheduler.Task.run(Task.scala:85)
>      at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
>      at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>      at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>      at java.lang.Thread.run(Thread.java:745)
> Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime 
> Error while processing row {"hr":"11","hour":"11"}
>      at 
> org.apache.hadoop.hive.ql.exec.MapOperator.process(MapOperator.java:562)
>      at 
> org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.processRow(SparkMapRecordHandler.java:136)
>      ... 16 more
> Caused by: java.lang.IllegalStateException: Failed to retrieve dynamic value 
> for RS_7_srcpart__col3_min
>      at 
> org.apache.hadoop.hive.ql.plan.DynamicValue.getValue(DynamicValue.java:126)
>      at 
> org.apache.hadoop.hive.ql.plan.DynamicValue.getWritableValue(DynamicValue.java:101)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeDynamicValueEvaluator._evaluate(ExprNodeDynamicValueEvaluator.java:51)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
>      at 
> org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan.evaluate(GenericUDFOPEqualOrGreaterThan.java:108)
>      at 
> org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween.evaluate(GenericUDFBetween.java:57)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
>      at 
> org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd.evaluate(GenericUDFOPAnd.java:63)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:187)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80)
>      at 
> org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator$DeferredExprObject.get(ExprNodeGenericFuncEvaluator.java:88)
>      at org.apache.hadoop.hive
> {code} 



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

Reply via email to