[ https://issues.apache.org/jira/browse/HIVE-28439?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Denys Kuzmenko updated HIVE-28439: ---------------------------------- Labels: hive-4.0.1-must pull-request-available (was: pull-request-available) > Iceberg: Bucket partition transform with DECIMAL can throw NPE > -------------------------------------------------------------- > > Key: HIVE-28439 > URL: https://issues.apache.org/jira/browse/HIVE-28439 > Project: Hive > Issue Type: Bug > Components: Iceberg integration > Affects Versions: 4.0.0 > Reporter: Shohei Okumiya > Assignee: Shohei Okumiya > Priority: Major > Labels: hive-4.0.1-must, pull-request-available > Fix For: 4.1.0 > > > Hive can fail when we bucket records by decimal columns. > {code:java} > CREATE TABLE test (c_decimal DECIMAL(38, 0)) PARTITIONED BY SPEC (bucket(8, > c_decimal)) STORED BY ICEBERG; > INSERT INTO test VALUES (CAST('50000000000000000000441610525' AS DECIMAL(38, > 0))); {code} > Stacktrace > {code:java} > ERROR : Vertex failed, vertexName=Map 1, > vertexId=vertex_1722775255811_0004_1_00, diagnostics=[Task failed, > taskId=task_1722775255811_0004_1_00_000000, diagnostics=[TaskAttempt 0 > failed, info=[Error: Node: > yarn-nodemanager-2.yarn-nodemanager.zookage.svc.cluster.local/10.1.5.93 : > Error while running task ( failure ) : > attempt_1722775255811_0004_1_00_000000_0:java.lang.RuntimeException: > java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: > Hive Runtime Error while processing writable > at > org.apache.hadoop.hive.ql.exec.tez.TezProcessor.initializeAndRunProcessor(TezProcessor.java:348) > at > org.apache.hadoop.hive.ql.exec.tez.TezProcessor.run(TezProcessor.java:276) > at > org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.run(LogicalIOProcessorRuntimeTask.java:381) > at > org.apache.tez.runtime.task.TaskRunner2Callable$1.run(TaskRunner2Callable.java:82) > at > org.apache.tez.runtime.task.TaskRunner2Callable$1.run(TaskRunner2Callable.java:69) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1899) > at > org.apache.tez.runtime.task.TaskRunner2Callable.callInternal(TaskRunner2Callable.java:69) > at > org.apache.tez.runtime.task.TaskRunner2Callable.callInternal(TaskRunner2Callable.java:39) > at org.apache.tez.common.CallableWithNdc.call(CallableWithNdc.java:36) > at > com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:131) > at > com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:75) > at > com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:82) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > Caused by: java.lang.RuntimeException: > org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while > processing writable > at > org.apache.hadoop.hive.ql.exec.tez.MapRecordSource.processRow(MapRecordSource.java:110) > at > org.apache.hadoop.hive.ql.exec.tez.MapRecordSource.pushRecord(MapRecordSource.java:83) > at > org.apache.hadoop.hive.ql.exec.tez.MapRecordProcessor.run(MapRecordProcessor.java:414) > at > org.apache.hadoop.hive.ql.exec.tez.TezProcessor.initializeAndRunProcessor(TezProcessor.java:293) > ... 16 more > Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime > Error while processing writable > at > org.apache.hadoop.hive.ql.exec.MapOperator.process(MapOperator.java:569) > at > org.apache.hadoop.hive.ql.exec.tez.MapRecordSource.processRow(MapRecordSource.java:101) > ... 19 more > Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: > java.lang.NullPointerException > at > org.apache.hadoop.hive.ql.exec.ReduceSinkOperator.process(ReduceSinkOperator.java:384) > at org.apache.hadoop.hive.ql.exec.Operator.forward(Operator.java:888) > at > org.apache.hadoop.hive.ql.exec.SelectOperator.process(SelectOperator.java:94) > at org.apache.hadoop.hive.ql.exec.Operator.forward(Operator.java:888) > at > org.apache.hadoop.hive.ql.exec.UDTFOperator.forwardUDTFOutput(UDTFOperator.java:133) > at > org.apache.hadoop.hive.ql.udf.generic.UDTFCollector.collect(UDTFCollector.java:45) > at > org.apache.hadoop.hive.ql.udf.generic.GenericUDTF.forward(GenericUDTF.java:110) > at > org.apache.hadoop.hive.ql.udf.generic.GenericUDTFInline.process(GenericUDTFInline.java:64) > at > org.apache.hadoop.hive.ql.exec.UDTFOperator.process(UDTFOperator.java:116) > at org.apache.hadoop.hive.ql.exec.Operator.forward(Operator.java:888) > at > org.apache.hadoop.hive.ql.exec.SelectOperator.process(SelectOperator.java:94) > at org.apache.hadoop.hive.ql.exec.Operator.forward(Operator.java:888) > at > org.apache.hadoop.hive.ql.exec.TableScanOperator.process(TableScanOperator.java:173) > at > org.apache.hadoop.hive.ql.exec.MapOperator$MapOpCtx.forward(MapOperator.java:155) > at > org.apache.hadoop.hive.ql.exec.MapOperator.process(MapOperator.java:555) > ... 20 more > Caused by: java.lang.NullPointerException > at > org.apache.iceberg.mr.hive.udf.GenericUDFIcebergBucket.lambda$initialize$4(GenericUDFIcebergBucket.java:137) > at > org.apache.iceberg.mr.hive.udf.GenericUDFIcebergBucket.evaluate(GenericUDFIcebergBucket.java:201) > at > org.apache.hadoop.hive.ql.exec.ExprNodeGenericFuncEvaluator._evaluate(ExprNodeGenericFuncEvaluator.java:235) > at > org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:80) > at > org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator.evaluate(ExprNodeEvaluator.java:68) > at > org.apache.hadoop.hive.ql.exec.ReduceSinkOperator.populateCachedDistributionKeys(ReduceSinkOperator.java:400) > at > org.apache.hadoop.hive.ql.exec.ReduceSinkOperator.process(ReduceSinkOperator.java:318) > ... 34 more {code} -- This message was sent by Atlassian Jira (v8.20.10#820010)