Github user rja1 commented on the issue:

    https://github.com/apache/zeppelin/pull/1157
  
    Thanks for the tip @prabhjyotsingh and for your work, hive is working!
    
    I'm running into an issue with jdbc(phoenix) now though, hoping you can 
help.  My interpreter config is listed below.  Note that we don't have a 
/hbase-secure dir in zookeeper, just hbase.
    [zk: localhost:2181(CONNECTED) 1] ls /hbase
    [replication, meta-region-server, rs, splitWAL, backup-masters, table-lock, 
flush-table-proc, region-in-transition, online-snapshot, acl, master, running, 
balancer, tokenauth, recovering-regions, draining, namespace, hbaseid, table]
    
    Here's the notebook / error:
    select * from USER_ACCOUNTS where USER_SEED = '1000'
    Failed after attempts=1, exceptions:
    Mon Jul 18 14:00:22 MDT 2016, 
RpcRetryingCaller{globalStartTime=1468872022128, pause=100, retries=1}, 
org.apache.hadoop.hbase.MasterNotRunningException: 
com.google.protobuf.ServiceException: 
org.apache.hadoop.hbase.exceptions.ConnectionClosingException: Call to 
name01.hadoop.test.company.com/10.4.59.25:60000 failed on local exception: 
org.apache.hadoop.hbase.exceptions.ConnectionClosingException: Connection to 
name01.hadoop.test.company.com/10.4.59.25:60000 is closing. Call id=0, 
waitTime=11
    class org.apache.phoenix.exception.PhoenixIOException
    org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:111)
    
org.apache.phoenix.query.ConnectionQueryServicesImpl.ensureTableCreated(ConnectionQueryServicesImpl.java:1064)
    
org.apache.phoenix.query.ConnectionQueryServicesImpl.createTable(ConnectionQueryServicesImpl.java:1370)
    
org.apache.phoenix.schema.MetaDataClient.createTableInternal(MetaDataClient.java:2116)
    
org.apache.phoenix.schema.MetaDataClient.createTable(MetaDataClient.java:828)
    
org.apache.phoenix.compile.CreateTableCompiler$2.execute(CreateTableCompiler.java:183)
    org.apache.phoenix.jdbc.PhoenixStatement$2.call(PhoenixStatement.java:338)
    org.apache.phoenix.jdbc.PhoenixStatement$2.call(PhoenixStatement.java:326)
    org.apache.phoenix.call.CallRunner.run(CallRunner.java:53)
    
org.apache.phoenix.jdbc.PhoenixStatement.executeMutation(PhoenixStatement.java:324)
    
org.apache.phoenix.jdbc.PhoenixStatement.executeUpdate(PhoenixStatement.java:1326)
    
org.apache.phoenix.query.ConnectionQueryServicesImpl$13.call(ConnectionQueryServicesImpl.java:2275)
    
org.apache.phoenix.query.ConnectionQueryServicesImpl$13.call(ConnectionQueryServicesImpl.java:2244)
    
org.apache.phoenix.util.PhoenixContextExecutor.call(PhoenixContextExecutor.java:78)
    
org.apache.phoenix.query.ConnectionQueryServicesImpl.init(ConnectionQueryServicesImpl.java:2244)
    
org.apache.phoenix.jdbc.PhoenixDriver.getConnectionQueryServices(PhoenixDriver.java:233)
    
org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.createConnection(PhoenixEmbeddedDriver.java:135)
    org.apache.phoenix.jdbc.PhoenixDriver.connect(PhoenixDriver.java:202)
    java.sql.DriverManager.getConnection(DriverManager.java:664)
    java.sql.DriverManager.getConnection(DriverManager.java:208)
    
org.apache.zeppelin.jdbc.JDBCInterpreter.getConnection(JDBCInterpreter.java:226)
    
org.apache.zeppelin.jdbc.JDBCInterpreter.getStatement(JDBCInterpreter.java:237)
    
org.apache.zeppelin.jdbc.JDBCInterpreter.executeSql(JDBCInterpreter.java:296)
    org.apache.zeppelin.jdbc.JDBCInterpreter.interpret(JDBCInterpreter.java:402)
    
org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:94)
    
org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:383)
    org.apache.zeppelin.scheduler.Job.run(Job.java:176)
    
org.apache.zeppelin.scheduler.ParallelScheduler$JobRunner.run(ParallelScheduler.java:162)
    java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
    java.util.concurrent.FutureTask.run(FutureTask.java:266)
    
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)
    
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)
    
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    java.lang.Thread.run(Thread.java:745)
    
    Interpreter Config:
        "2BRGRRCBW": {
          "id": "2BRGRRCBW",
          "name": "jdbc",
          "group": "jdbc",
          "properties": {
            "phoenix.user": "zeppelin",
            "hive.url": 
"jdbc:hive2://cms01.hadoop.test.company.com:10000/default;principal\u003dhive/_h...@hadoop.test.company.com",
            "default.driver": "org.postgresql.Driver",
            "phoenix.driver": "org.apache.phoenix.jdbc.PhoenixDriver",
            "hive.user": "hive",
            "psql.password": "",
            "psql.user": "phoenixuser",
            "psql.url": "jdbc:postgresql://localhost:5432/",
            "default.user": "gpadmin",
            "phoenix.hbase.client.retries.number": "1",
            "phoenix.url": "jdbc:phoenix:cms01.hadoop.test.company.com:/hbase",
            "tajo.url": "jdbc:tajo://localhost:26002/default",
            "tajo.driver": "org.apache.tajo.jdbc.TajoDriver",
            "psql.driver": "org.postgresql.Driver",
            "default.password": "",
            "zeppelin.interpreter.localRepo": 
"/opt/zeppelin-ZEPPELIN-1146/local-repo/2BRGRRCBW",
            "zeppelin.jdbc.auth.type": "KERBEROS",
            "zeppelin.jdbc.concurrent.use": "true",
            "hive.password": "",
            "hive.driver": "org.apache.hive.jdbc.HiveDriver",
            "zeppelin.jdbc.keytab.location": 
"/opt/zeppelin-ZEPPELIN-1146/conf/zeppelin.keytab",
            "common.max_count": "1000",
            "phoenix.password": "",
            "zeppelin.jdbc.principal": "zeppe...@hadoop.test.company.com",
            "zeppelin.jdbc.concurrent.max_connection": "10",
            "default.url": "jdbc:postgresql://localhost:5432/"
          },
          "interpreterGroup": [
            {
              "name": "sql",
              "class": "org.apache.zeppelin.jdbc.JDBCInterpreter"
            }
          ],
          "dependencies": [
            {
              "groupArtifactVersion": 
"/opt/cloudera/CDH/jars/hive-jdbc-1.1.0-cdh5.5.2.jar",
              "local": false
            },
            {
              "groupArtifactVersion": 
"/opt/cloudera/CDH/jars/hadoop-common-2.6.0-cdh5.5.2.jar",
              "local": false
            },
            {
              "groupArtifactVersion": 
"/opt/cloudera/CDH/jars/hive-shims-0.23-1.1.0-cdh5.5.2.jar",
              "local": false,
              "exclusions": []
            },
            {
              "groupArtifactVersion": 
"/opt/cloudera/CDH/jars/hive-jdbc-1.1.0-cdh5.5.2-standalone.jar",
              "local": false
            },
            {
              "groupArtifactVersion": 
"/opt/cloudera/CDH/jars/phoenix-core-4.7.0-cdh5.5.1.jar",
              "local": false
            },
            {
              "groupArtifactVersion": 
"/opt/cloudera/CDH/jars/hbase-common-1.0.0-cdh5.5.2.jar",
              "local": false
            },
            {
              "groupArtifactVersion": 
"/opt/cloudera/CDH/jars/hbase-client-1.0.0-cdh5.5.2.jar",
              "local": false
            },
            {
              "groupArtifactVersion": 
"/opt/cloudera/CDH/jars/phoenix-4.7.0-cdh5.5.1-client.jar",
              "local": false
            }
          ],
          "option": {
            "remote": true,
            "port": -1,
            "perNoteSession": false,
            "perNoteProcess": false,
            "isExistingProcess": false
          }
        },


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to