[ https://issues.apache.org/jira/browse/HIVE-10828?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14560290#comment-14560290 ]
Eugene Koifman commented on HIVE-10828: --------------------------------------- Simpler repro case {noformat} set hive.enforce.bucketing=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.cbo.enable=false; drop table if exists acid_partitioned; create table acid_partitioned (a int, c string) partitioned by (p int) clustered by (a) into 1 buckets; insert into acid_partitioned partition (p) (a,p) values(1,1); {noformat} above example disables CBO because it causes additional issues. will file separate ticket for that > Insert...values for fewer number of columns fail > ------------------------------------------------ > > Key: HIVE-10828 > URL: https://issues.apache.org/jira/browse/HIVE-10828 > Project: Hive > Issue Type: Bug > Components: Hive > Affects Versions: 1.2.0 > Reporter: Aswathy Chellammal Sreekumar > Assignee: Eugene Koifman > > Schema on insert queries with fewer number of columns fails with below error > message > ERROR ql.Driver (SessionState.java:printError(957)) - FAILED: > NullPointerException null > java.lang.NullPointerException > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genReduceSinkPlan(SemanticAnalyzer.java:7277) > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genBucketingSortingDest(SemanticAnalyzer.java:6120) > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genFileSinkPlan(SemanticAnalyzer.java:6291) > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genPostGroupByBodyPlan(SemanticAnalyzer.java:8992) > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genBodyPlan(SemanticAnalyzer.java:8883) > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genPlan(SemanticAnalyzer.java:9728) > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genPlan(SemanticAnalyzer.java:9621) > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genOPTree(SemanticAnalyzer.java:10094) > at > org.apache.hadoop.hive.ql.parse.CalcitePlanner.genOPTree(CalcitePlanner.java:324) > at > org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.analyzeInternal(SemanticAnalyzer.java:10105) > at > org.apache.hadoop.hive.ql.parse.CalcitePlanner.analyzeInternal(CalcitePlanner.java:208) > at > org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.analyze(BaseSemanticAnalyzer.java:227) > at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:424) > at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:308) > at org.apache.hadoop.hive.ql.Driver.compileInternal(Driver.java:1122) > at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:1170) > at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1059) > at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1049) > at org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:213) > at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:165) > at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376) > at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:311) > at org.apache.hadoop.hive.cli.CliDriver.processReader(CliDriver.java:409) > at org.apache.hadoop.hive.cli.CliDriver.processFile(CliDriver.java:425) > at org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:714) > at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:681) > at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:621) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:606) > at org.apache.hadoop.util.RunJar.run(RunJar.java:221) > at org.apache.hadoop.util.RunJar.main(RunJar.java:136) > *Steps to reproduce:* > set hive.support.concurrency=true; > set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; > set hive.enforce.bucketing=true; > drop table if exists table1; > create table table1 (a int, b string, c string) > partitioned by (bkt int) > clustered by (a) into 2 buckets > stored as orc > tblproperties ('transactional'='true'); > insert into table_1 partition (bkt) (b, a, bkt) values > ('part one', 1, 1), ('part one', 2, 1), ('part two', 3, 2), ('part > three', 4, 3); -- This message was sent by Atlassian JIRA (v6.3.4#6332)