It seems that this is only a WARNING.However , I noticed that even for
success cases (managed to build the cube), I can see following WARNINGs and
ERRORs..




2016-09-27 00:15:55,072 WARN [main] org.apache.hadoop.ipc.Client: Exception
encountered while connecting to the server :
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.ipc.StandbyException):
Operation category READ is not supported in state standby
at
org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:375)
at
org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:563)
at org.apache.hadoop.ipc.Client$Connection.access$1900(Client.java:378)
at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:732)
at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:728)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1709)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:727)
at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1492)
at org.apache.hadoop.ipc.Client.call(Client.java:1402)
at org.apache.hadoop.ipc.Client.call(Client.java:1363)
at
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
at com.sun.proxy.$Proxy10.getFileInfo(Unknown Source)
at
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:773)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:256)
at
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:104)
at com.sun.proxy.$Proxy11.getFileInfo(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:2162)
at
org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1363)
at
org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1359)
at
org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at
org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1359)
at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1424)
at
org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.mkdir(JobHistoryEventHandler.java:269)
at
org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.serviceInit(JobHistoryEventHandler.java:168)
at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
at
org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
at
org.apache.hadoop.mapreduce.v2.app.MRAppMaster.serviceInit(MRAppMaster.java:459)
at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
at
org.apache.hadoop.mapreduce.v2.app.MRAppMaster$5.run(MRAppMaster.java:1556)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1709)
at
org.apache.hadoop.mapreduce.v2.app.MRAppMaster.initAndStartAppMaster(MRAppMaster.java:1553)
at
org.apache.hadoop.mapreduce.v2.app.MRAppMaster.main(MRAppMaster.java:1486)
2016-09-27 00:15:55,084 INFO [main]
org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler: Emitting job
history data to the timeline server is not enabled
2016-09-27 00:15:55,138 INFO [main]
org.apache.hadoop.yarn.event.AsyncDispatcher: Registering class
org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent$Type for class
org.apache.hadoop.mapreduce.v2.app.MRAppMaster$JobFinishEventHandler
2016-09-27 00:15:55,219 INFO [main]
org.apache.hadoop.metrics2.impl.MetricsConfig: loaded properties from
hadoop-metrics2.properties
2016-09-27 00:15:55,272 INFO [main]
org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot
period at 10 second(s).
2016-09-27 00:15:55,272 INFO [main]
org.apache.hadoop.metrics2.impl.MetricsSystemImpl: MRAppMaster metrics
system started
2016-09-27 00:15:55,279 INFO [main]
org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl: Adding job token for
job_1472454550517_40237 to jobTokenSecretManager
2016-09-27 00:15:55,379 INFO [main]
org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl: Not uberizing
job_1472454550517_40237 because: not enabled; too many reduces;
2016-09-27 00:15:55,391 INFO [main]
org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl: Input size for job
job_1472454550517_40237 = 129. Number of splits = 1
2016-09-27 00:15:55,393 INFO [main]
org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl: Number of reduces for
job job_1472454550517_40237 = 10
2016-09-27 00:15:55,393 INFO [main]
org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl:
job_1472454550517_40237Job Transitioned from NEW to INITED
2016-09-27 00:15:55,394 INFO [main]
org.apache.hadoop.mapreduce.v2.app.MRAppMaster: MRAppMaster launching
normal, non-uberized, multi-container job job_1472454550517_40237.
2016-09-27 00:15:55,420 INFO [main] org.apache.hadoop.ipc.CallQueueManager:
Using callQueue class java.util.concurrent.LinkedBlockingQueue
2016-09-27 00:15:55,425 INFO [Socket Reader #1 for port 33743]
org.apache.hadoop.ipc.Server: Starting Socket Reader #1 for port 33743
2016-09-27 00:15:55,437 INFO [main]
org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl: Adding
protocol org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB to the server
2016-09-27 00:15:55,463 INFO [IPC Server Responder]
org.apache.hadoop.ipc.Server: IPC Server Responder: starting
2016-09-27 00:15:55,463 INFO [IPC Server listener on 33743]
org.apache.hadoop.ipc.Server: IPC Server listener on 33743: starting
2016-09-27 00:15:55,463 INFO [main]
org.apache.hadoop.mapreduce.v2.app.client.MRClientService: Instantiated
MRClientService at hdp-dn5029.hadoop.local/100.78.9.20:33743
2016-09-27 00:15:55,524 INFO [main] org.mortbay.log: Logging to
org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via
org.mortbay.log.Slf4jLog
2016-09-27 00:15:55,530 INFO [main]
org.apache.hadoop.security.authentication.server.AuthenticationFilter:
Unable to initialize FileSignerSecretProvider, falling back to use random
secrets.
2016-09-27 00:15:55,533 WARN [main] org.apache.hadoop.http.HttpRequestLog:
Jetty request log can only be enabled using Log4j
2016-09-27 00:15:55,538 INFO [main] org.apache.hadoop.http.HttpServer2:
Added global filter 'safety'
(class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter)
2016-09-27 00:15:55,565 INFO [main] org.apache.hadoop.http.HttpServer2:
Added filter AM_PROXY_FILTER
(class=org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter) to
context mapreduce
2016-09-27 00:15:55,565 INFO [main] org.apache.hadoop.http.HttpServer2:
Added filter AM_PROXY_FILTER
(class=org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter) to
context static
2016-09-27 00:15:55,567 INFO [main] org.apache.hadoop.http.HttpServer2:
adding path spec: /mapreduce/*
2016-09-27 00:15:55,567 INFO [main] org.apache.hadoop.http.HttpServer2:
adding path spec: /ws/*
2016-09-27 00:15:55,781 INFO [main] org.apache.hadoop.yarn.webapp.WebApps:
Registered webapp guice modules
2016-09-27 00:15:55,782 INFO [main] org.apache.hadoop.http.HttpServer2:
Jetty bound to port 57350
2016-09-27 00:15:55,782 INFO [main] org.mortbay.log: jetty-6.1.26.hwx
2016-09-27 00:15:55,802 INFO [main] org.mortbay.log: Extract
jar:file:/usr/hdp/2.4.2.0-258/hadoop-2.7.1.2.4.2.0-258/share/hadoop/yarn/hadoop-yarn-common-2.7.1.2.4.2.0-258.jar!/webapps/mapreduce
to
/var/hadoop/vol10/yarn/nm/usercache/kylin/appcache/application_1472454550517_40237/container_1472454550517_40237_01_000001/tmp/Jetty_0_0_0_0_57350_mapreduce____2u9tzh/webapp
2016-09-27 00:15:56,596 INFO [main] org.mortbay.log: Started HttpServer2$
[email protected]:57350
2016-09-27 00:15:56,596 INFO [main] org.apache.hadoop.yarn.webapp.WebApps:
Web app mapreduce started at 57350
2016-09-27 00:15:56,599 INFO [main] org.apache.hadoop.ipc.CallQueueManager:
Using callQueue class java.util.concurrent.LinkedBlockingQueue
2016-09-27 00:15:56,600 INFO [Socket Reader #1 for port 33397]
org.apache.hadoop.ipc.Server: Starting Socket Reader #1 for port 33397
2016-09-27 00:15:56,622 INFO [IPC Server Responder]
org.apache.hadoop.ipc.Server: IPC Server Responder: starting
2016-09-27 00:15:56,622 INFO [IPC Server listener on 33397]
org.apache.hadoop.ipc.Server: IPC Server listener on 33397: starting
2016-09-27 00:15:56,636 INFO [main]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor:
nodeBlacklistingEnabled:true
2016-09-27 00:15:56,636 INFO [main]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor:
maxTaskFailuresPerNode is 3
2016-09-27 00:15:56,636 INFO [main]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor:
blacklistDisablePercent is 33
2016-09-27 00:15:56,690 INFO [main]
org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider: Failing
over to rm2
2016-09-27 00:15:56,719 INFO [main]
org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator:
maxContainerCapability: <memory:204800, vCores:32>
2016-09-27 00:15:56,720 INFO [main]
org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator: queue: default
2016-09-27 00:15:56,724 INFO [main]
org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl: Upper
limit on the thread pool size is 500
2016-09-27 00:15:56,724 INFO [main]
org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl: The
thread pool initial size is 10
2016-09-27 00:15:56,727 INFO [main]
org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy:
yarn.client.max-cached-nodemanagers-proxies : 0
2016-09-27 00:15:56,733 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl:
job_1472454550517_40237Job Transitioned from INITED to SETUP
2016-09-27 00:15:56,735 INFO [CommitterEvent Processor #0]
org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler: Processing
the event EventType: JOB_SETUP
2016-09-27 00:15:56,745 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl:
job_1472454550517_40237Job Transitioned from SETUP to RUNNING
2016-09-27 00:15:56,765 INFO [AsyncDispatcher event handler]
org.apache.hadoop.yarn.util.RackResolver: Resolved hdp-dn5006.hadoop.local
to /bk01
2016-09-27 00:15:56,771 INFO [AsyncDispatcher event handler]
org.apache.hadoop.yarn.util.RackResolver: Resolved hdp-dn5031.hadoop.local
to /bk05
2016-09-27 00:15:56,773 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_m_000000 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,773 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000000 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,774 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000001 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,774 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000002 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,774 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000003 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,774 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000004 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,775 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000005 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,775 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000006 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,775 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000007 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,775 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000008 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,775 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_r_000009 Task Transitioned from NEW to SCHEDULED
2016-09-27 00:15:56,776 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_m_000000_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000000_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000001_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000002_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000003_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000004_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000005_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000006_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000007_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,777 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000008_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,778 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_r_000009_0 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:15:56,778 INFO [Thread-55]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator:
mapResourceRequest:<memory:4736, vCores:1>
2016-09-27 00:15:56,785 INFO [Thread-55]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator:
reduceResourceRequest:<memory:9472, vCores:1>
2016-09-27 00:15:56,814 INFO [eventHandlingThread]
org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler: Event Writer
setup for JobId: job_1472454550517_40237, File:
hdfs://nameservice1:8020/tmp/hadoop-yarn/staging/kylin/.staging/job_1472454550517_40237/job_1472454550517_40237_1.jhist
2016-09-27 00:15:57,723 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Before
Scheduling: PendingReds:10 ScheduledMaps:1 ScheduledReds:0 AssignedMaps:0
AssignedReds:0 CompletedMaps:0 CompletedReds:0 ContAlloc:0 ContRel:0
HostLocal:0 RackLocal:0
2016-09-27 00:15:57,776 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor: getResources()
for application_1472454550517_40237: ask=5 release= 0 newContainers=0
finishedContainers=0 resourcelimit=<memory:9472, vCores:1> knownNMs=40
2016-09-27 00:15:57,777 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Recalculating
schedule, headroom=<memory:9472, vCores:1>
2016-09-27 00:15:57,778 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Reduce slow
start threshold not met. completedMapsForReduceSlowstart 1
2016-09-27 00:15:58,792 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Got allocated
containers 1
2016-09-27 00:15:58,800 INFO [RMCommunicator Allocator]
org.apache.hadoop.yarn.util.RackResolver: Resolved hdp-dn5026.hadoop.local
to /bk04
2016-09-27 00:15:58,803 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Assigned
container container_1472454550517_40237_01_000002 to
attempt_1472454550517_40237_m_000000_0
2016-09-27 00:15:58,806 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Recalculating
schedule, headroom=<memory:4736, vCores:1>
2016-09-27 00:15:58,806 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Reduce slow
start threshold not met. completedMapsForReduceSlowstart 1
2016-09-27 00:15:58,806 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: After
Scheduling: PendingReds:10 ScheduledMaps:0 ScheduledReds:0 AssignedMaps:1
AssignedReds:0 CompletedMaps:0 CompletedReds:0 ContAlloc:1 ContRel:0
HostLocal:0 RackLocal:0
2016-09-27 00:15:58,865 INFO [AsyncDispatcher event handler]
org.apache.hadoop.yarn.util.RackResolver: Resolved hdp-dn5026.hadoop.local
to /bk04
2016-09-27 00:15:58,884 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: The job-jar
file on the remote FS is
hdfs://nameservice1/tmp/hadoop-yarn/staging/kylin/.staging/job_1472454550517_40237/job.jar
2016-09-27 00:15:58,886 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: The job-conf
file on the remote FS is
/tmp/hadoop-yarn/staging/kylin/.staging/job_1472454550517_40237/job.xml
2016-09-27 00:15:58,925 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: Adding #1
tokens and #1 secret keys for NM use for launching container
2016-09-27 00:15:58,925 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: Size of
containertokens_dob is 2
2016-09-27 00:15:58,926 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: Putting
shuffle token in serviceData
2016-09-27 00:15:59,069 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_m_000000_0 TaskAttempt Transitioned from
UNASSIGNED to ASSIGNED
2016-09-27 00:15:59,074 INFO [ContainerLauncher #0]
org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl:
Processing the event EventType: CONTAINER_REMOTE_LAUNCH for container
container_1472454550517_40237_01_000002 taskAttempt
attempt_1472454550517_40237_m_000000_0
2016-09-27 00:15:59,077 INFO [ContainerLauncher #0]
org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl:
Launching attempt_1472454550517_40237_m_000000_0
2016-09-27 00:15:59,078 INFO [ContainerLauncher #0]
org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy:
Opening proxy : hdp-dn5026.hadoop.local:45454
2016-09-27 00:15:59,166 INFO [ContainerLauncher #0]
org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl: Shuffle
port returned by ContainerManager for
attempt_1472454550517_40237_m_000000_0 : 13562
2016-09-27 00:15:59,168 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: TaskAttempt:
[attempt_1472454550517_40237_m_000000_0] using containerId:
[container_1472454550517_40237_01_000002 on NM:
[hdp-dn5026.hadoop.local:45454]
2016-09-27 00:15:59,173 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_m_000000_0 TaskAttempt Transitioned from
ASSIGNED to RUNNING
2016-09-27 00:15:59,173 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl:
task_1472454550517_40237_m_000000 Task Transitioned from SCHEDULED to
RUNNING
2016-09-27 00:15:59,706 INFO [Socket Reader #1 for port 33743]
SecurityLogger.org.apache.hadoop.ipc.Server: Auth successful for kylin/
[email protected] (auth:SIMPLE)
2016-09-27 00:15:59,717 INFO [Socket Reader #1 for port 33743]
SecurityLogger.org.apache.hadoop.security.authorize.ServiceAuthorizationManager:
Authorization successful for kylin/
[email protected] (auth:TOKEN) for
protocol=interface org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB
2016-09-27 00:15:59,808 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor: getResources()
for application_1472454550517_40237: ask=5 release= 0 newContainers=0
finishedContainers=0 resourcelimit=<memory:4736, vCores:1> knownNMs=40
2016-09-27 00:16:04,643 INFO [Socket Reader #1 for port 33397]
SecurityLogger.org.apache.hadoop.ipc.Server: Auth successful for
job_1472454550517_40237 (auth:SIMPLE)
2016-09-27 00:16:04,653 INFO [Socket Reader #1 for port 33397]
SecurityLogger.org.apache.hadoop.security.authorize.ServiceAuthorizationManager:
Authorization successful for job_1472454550517_40237 (auth:TOKEN) for
protocol=interface org.apache.hadoop.mapred.TaskUmbilicalProtocol
2016-09-27 00:16:04,665 INFO [IPC Server handler 2 on 33397]
org.apache.hadoop.mapred.TaskAttemptListenerImpl: JVM with ID :
jvm_1472454550517_40237_m_000002 asked for a task
2016-09-27 00:16:04,666 INFO [IPC Server handler 2 on 33397]
org.apache.hadoop.mapred.TaskAttemptListenerImpl: JVM with ID:
jvm_1472454550517_40237_m_000002 given task:
attempt_1472454550517_40237_m_000000_0
2016-09-27 00:16:05,927 INFO [IPC Server handler 0 on 33397]
org.apache.hadoop.mapred.TaskAttemptListenerImpl: Progress of TaskAttempt
attempt_1472454550517_40237_m_000000_0 is : 0.0
2016-09-27 00:16:05,940 ERROR [IPC Server handler 3 on 33397]
org.apache.hadoop.mapred.TaskAttemptListenerImpl: Task:
attempt_1472454550517_40237_m_000000_0 - exited : java.io.IOException:
Deserialization error: org.apache.hadoop.hive.metastore.api.Table; local
class incompatible: stream classdesc serialVersionUID =
7046373721250106722, local class serialVersionUID = 398473631015277182
at org.apache.hive.hcatalog.common.HCatUtil.deserialize(HCatUtil.java:120)
at
org.apache.hive.hcatalog.mapreduce.HCatBaseInputFormat.createRecordReader(HCatBaseInputFormat.java:183)
at
org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.<init>(MapTask.java:515)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:758)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1709)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:162)
Caused by: java.io.InvalidClassException:
org.apache.hadoop.hive.metastore.api.Table; local class incompatible:
stream classdesc serialVersionUID = 7046373721250106722, local class
serialVersionUID = 398473631015277182
at java.io.ObjectStreamClass.initNonProxy(ObjectStreamClass.java:616)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:1623)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1518)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1774)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2000)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1924)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2000)
at java.io.ObjectInputStream.defaultReadObject(ObjectInputStream.java:501)
at
org.apache.hive.hcatalog.mapreduce.InputJobInfo.readObject(InputJobInfo.java:181)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at java.io.ObjectStreamClass.invokeReadObject(ObjectStreamClass.java:1058)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1900)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
at org.apache.hive.hcatalog.common.HCatUtil.deserialize(HCatUtil.java:118)
... 9 more

2016-09-27 00:16:05,940 INFO [IPC Server handler 3 on 33397]
org.apache.hadoop.mapred.TaskAttemptListenerImpl: Diagnostics report from
attempt_1472454550517_40237_m_000000_0: Error: java.io.IOException:
Deserialization error: org.apache.hadoop.hive.metastore.api.Table; local
class incompatible: stream classdesc serialVersionUID =
7046373721250106722, local class serialVersionUID = 398473631015277182
at org.apache.hive.hcatalog.common.HCatUtil.deserialize(HCatUtil.java:120)
at
org.apache.hive.hcatalog.mapreduce.HCatBaseInputFormat.createRecordReader(HCatBaseInputFormat.java:183)
at
org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.<init>(MapTask.java:515)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:758)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1709)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:162)
Caused by: java.io.InvalidClassException:
org.apache.hadoop.hive.metastore.api.Table; local class incompatible:
stream classdesc serialVersionUID = 7046373721250106722, local class
serialVersionUID = 398473631015277182
at java.io.ObjectStreamClass.initNonProxy(ObjectStreamClass.java:616)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:1623)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1518)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1774)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2000)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1924)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2000)
at java.io.ObjectInputStream.defaultReadObject(ObjectInputStream.java:501)
at
org.apache.hive.hcatalog.mapreduce.InputJobInfo.readObject(InputJobInfo.java:181)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at java.io.ObjectStreamClass.invokeReadObject(ObjectStreamClass.java:1058)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1900)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
at org.apache.hive.hcatalog.common.HCatUtil.deserialize(HCatUtil.java:118)
... 9 more

2016-09-27 00:16:05,943 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: Diagnostics
report from attempt_1472454550517_40237_m_000000_0: Error:
java.io.IOException: Deserialization error:
org.apache.hadoop.hive.metastore.api.Table; local class incompatible:
stream classdesc serialVersionUID = 7046373721250106722, local class
serialVersionUID = 398473631015277182
at org.apache.hive.hcatalog.common.HCatUtil.deserialize(HCatUtil.java:120)
at
org.apache.hive.hcatalog.mapreduce.HCatBaseInputFormat.createRecordReader(HCatBaseInputFormat.java:183)
at
org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.<init>(MapTask.java:515)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:758)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1709)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:162)
Caused by: java.io.InvalidClassException:
org.apache.hadoop.hive.metastore.api.Table; local class incompatible:
stream classdesc serialVersionUID = 7046373721250106722, local class
serialVersionUID = 398473631015277182
at java.io.ObjectStreamClass.initNonProxy(ObjectStreamClass.java:616)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:1623)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1518)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1774)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2000)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1924)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2000)
at java.io.ObjectInputStream.defaultReadObject(ObjectInputStream.java:501)
at
org.apache.hive.hcatalog.mapreduce.InputJobInfo.readObject(InputJobInfo.java:181)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at java.io.ObjectStreamClass.invokeReadObject(ObjectStreamClass.java:1058)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1900)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
at org.apache.hive.hcatalog.common.HCatUtil.deserialize(HCatUtil.java:118)
... 9 more

2016-09-27 00:16:05,944 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_m_000000_0 TaskAttempt Transitioned from
RUNNING to FAIL_CONTAINER_CLEANUP
2016-09-27 00:16:05,946 INFO [ContainerLauncher #1]
org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl:
Processing the event EventType: CONTAINER_REMOTE_CLEANUP for container
container_1472454550517_40237_01_000002 taskAttempt
attempt_1472454550517_40237_m_000000_0
2016-09-27 00:16:05,947 INFO [ContainerLauncher #1]
org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl: KILLING
attempt_1472454550517_40237_m_000000_0
2016-09-27 00:16:05,947 INFO [ContainerLauncher #1]
org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy:
Opening proxy : hdp-dn5026.hadoop.local:45454
2016-09-27 00:16:05,969 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_m_000000_0 TaskAttempt Transitioned from
FAIL_CONTAINER_CLEANUP to FAIL_TASK_CLEANUP
2016-09-27 00:16:05,970 INFO [CommitterEvent Processor #1]
org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler: Processing
the event EventType: TASK_ABORT
2016-09-27 00:16:05,976 WARN [CommitterEvent Processor #1]
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter: Could not
delete
hdfs://nameservice1/user/kylin/kylin/kylin_metadata/kylin-60a2fc6f-6eb3-4cb4-b8e3-8e3a00038891/RAT_Cube2/fact_distinct_columns/_temporary/1/_temporary/attempt_1472454550517_40237_m_000000_0
2016-09-27 00:16:05,978 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_m_000000_0 TaskAttempt Transitioned from
FAIL_TASK_CLEANUP to FAILED
2016-09-27 00:16:05,987 INFO [AsyncDispatcher event handler]
org.apache.hadoop.yarn.util.RackResolver: Resolved hdp-dn5006.hadoop.local
to /bk01
2016-09-27 00:16:05,987 INFO [AsyncDispatcher event handler]
org.apache.hadoop.yarn.util.RackResolver: Resolved hdp-dn5031.hadoop.local
to /bk05
2016-09-27 00:16:05,988 INFO [Thread-55]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor: 1 failures on
node hdp-dn5026.hadoop.local
2016-09-27 00:16:05,990 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl:
attempt_1472454550517_40237_m_000000_1 TaskAttempt Transitioned from NEW to
UNASSIGNED
2016-09-27 00:16:05,991 INFO [Thread-55]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Added
attempt_1472454550517_40237_m_000000_1 to list of failed maps
2016-09-27 00:16:06,818 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Before
Scheduling: PendingReds:10 ScheduledMaps:1 ScheduledReds:0 AssignedMaps:1
AssignedReds:0 CompletedMaps:0 CompletedReds:0 ContAlloc:1 ContRel:0
HostLocal:0 RackLocal:0
2016-09-27 00:16:06,828 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor: getResources()
for application_1472454550517_40237: ask=1 release= 0 newContainers=0
finishedContainers=1 resourcelimit=<memory:9472, vCores:1> knownNMs=40
2016-09-27 00:16:06,828 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Received
completed container container_1472454550517_40237_01_000002
2016-09-27 00:16:06,829 INFO [RMCommunicator Allocator]
org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: Recalculating
schedule, headroom=<memory:9472, vCores:1>
2016-09-27 00:16:06,829 INFO [AsyncDispatcher event handler]
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: Diagnostics
report from attempt_1472454550517_40237_m_000000_0: Container killed by the
ApplicationMaster.
Container killed on request. Exit code is 143
Container exited with a non-zero exit code 143

On Tue, Sep 27, 2016 at 7:53 AM, Ashika Umanga Umagiliya <
[email protected]> wrote:

> Greetings,
>
> I sucessfyully managed to install Kylin and build the sample cube.
> We created my own cube using relatively large hive table and during Cube
> generation, in the step "#3 Step Name: Extract Fact Table Distinct Columns"
> the MR job fails.(I tried 3 times and it failed everytime giving the same
> error)
>
> MR log is as follows :
>
>
> 2016-09-26 09:32:09,728 INFO [main] 
> org.apache.hadoop.mapreduce.v2.app.MRAppMaster:
> Created MRAppMaster for application appattempt_1472454550517_38668_000001
> 2016-09-26 09:32:09,958 WARN [main] org.apache.hadoop.util.NativeCodeLoader:
> Unable to load native-hadoop library for your platform... using
> builtin-java classes where applicable
> 2016-09-26 09:32:09,993 INFO [main] 
> org.apache.hadoop.mapreduce.v2.app.MRAppMaster:
> Executing with tokens:
> 2016-09-26 09:32:10,229 INFO [main] 
> org.apache.hadoop.mapreduce.v2.app.MRAppMaster:
> Kind: YARN_AM_RM_TOKEN, Service: , Ident: (appAttemptId { application_id {
> id: 38668 cluster_timestamp: 1472454550517 } attemptId: 1 } keyId:
> -991094474)
> 2016-09-26 09:32:10,230 INFO [main] 
> org.apache.hadoop.mapreduce.v2.app.MRAppMaster:
> Kind: HDFS_DELEGATION_TOKEN, Service: ha-hdfs:nameservice1, Ident:
> (HDFS_DELEGATION_TOKEN token 222750 for kylin)
> 2016-09-26 09:32:10,246 INFO [main] 
> org.apache.hadoop.mapreduce.v2.app.MRAppMaster:
> Using mapred newApiCommitter.
> 2016-09-26 09:32:10,248 INFO [main] 
> org.apache.hadoop.mapreduce.v2.app.MRAppMaster:
> OutputCommitter set in config null
> 2016-09-26 09:32:10,285 INFO [main] org.apache.hadoop.mapreduce.
> lib.output.FileOutputCommitter: File Output Committer Algorithm version
> is 1
> 2016-09-26 09:32:10,286 INFO [main] org.apache.hadoop.mapreduce.
> lib.output.FileOutputCommitter: FileOutputCommitter skip cleanup
> _temporary folders under output directory:false, ignore cleanup failures:
> false
> 2016-09-26 09:32:10,804 WARN [main] 
> org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory:
> The short-circuit local reads feature cannot be used because libhadoop
> cannot be loaded.
> 2016-09-26 09:32:10,810 INFO [main] 
> org.apache.hadoop.mapreduce.v2.app.MRAppMaster:
> OutputCommitter is org.apache.hadoop.mapreduce.
> lib.output.FileOutputCommitter
> 2016-09-26 09:32:10,866 WARN [main] org.apache.hadoop.ipc.Client:
> Exception encountered while connecting to the server :
> org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.ipc.StandbyException):
> Operation category READ is not supported in state standby
> at org.apache.hadoop.security.SaslRpcClient.saslConnect(
> SaslRpcClient.java:375)
> at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.
> java:563)
> at org.apache.hadoop.ipc.Client$Connection.access$1900(Client.java:378)
> at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:732)
> at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:728)
> at java.security.AccessController.doPrivileged(Native Method)
> at javax.security.auth.Subject.doAs(Subject.java:422)
> at org.apache.hadoop.security.UserGroupInformation.doAs(
> UserGroupInformation.java:1709)
> at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:727)
> at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
> at org.apache.hadoop.ipc.Client.getConnection(Client.java:1492)
> at org.apache.hadoop.ipc.Client.call(Client.java:1402)
> at org.apache.hadoop.ipc.Client.call(Client.java:1363)
> at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.
> invoke(ProtobufRpcEngine.java:229)
> at com.sun.proxy.$Proxy10.getFileInfo(Unknown Source)
> at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslat
> orPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:773)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at sun.reflect.NativeMethodAccessorImpl.invoke(
> NativeMethodAccessorImpl.java:62)
> at sun.reflect.DelegatingMethodAccessorImpl.invoke(
> DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:497)
> at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(
> RetryInvocationHandler.java:256)
> at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(
> RetryInvocationHandler.java:104)
> at com.sun.proxy.$Proxy11.getFileInfo(Unknown Source)
> at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:2162)
> at org.apache.hadoop.hdfs.DistributedFileSystem$24.
> doCall(DistributedFileSystem.java:1363)
> at org.apache.hadoop.hdfs.DistributedFileSystem$24.
> doCall(DistributedFileSystem.java:1359)
> at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(
> FileSystemLinkResolver.java:81)
> at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(
> DistributedFileSystem.java:1359)
> at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1424)
> at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.
> serviceInit(MRAppMaster.java:291)
> at org.apache.hadoop.service.AbstractService.init(
> AbstractService.java:163)
> at org.apache.hadoop.mapreduce.v2.app.MRAppMaster$5.run(
> MRAppMaster.java:1556)
> at java.security.AccessController.doPrivileged(Native Method)
> at javax.security.auth.Subject.doAs(Subject.java:422)
> at org.apache.hadoop.security.UserGroupInformation.doAs(
> UserGroupInformation.java:1709)
> at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.initAndStartAppMaster(
> MRAppMaster.java:1553)
> at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.main(
> MRAppMaster.java:1486)
> 2016-09-26 09:32:10,991 INFO [main] org.apache.hadoop.yarn
>
>
>


-- 
Umanga
http://jp.linkedin.com/in/umanga
http://umanga.ifreepages.com

Reply via email to