See <https://builds.apache.org/job/Hadoop-Hdfs-trunk-Java8/1191/changes>
Changes:
[Arun Suresh] YARN-5073. Refactor startContainerInternal() in ContainerManager
to
------------------------------------------
[...truncated 8869 lines...]
at org.apache.hadoop.ipc.Server$Responder.run(Server.java:1062)
"Timer-5" daemon prio=5 tid=83 timed_waiting
java.lang.Thread.State: TIMED_WAITING
at java.lang.Object.wait(Native Method)
at java.util.TimerThread.mainLoop(Timer.java:552)
at java.util.TimerThread.run(Timer.java:505)
"nioEventLoopGroup-3-1" prio=10 tid=129 timed_waiting
java.lang.Thread.State: TIMED_WAITING
at java.lang.Thread.sleep(Native Method)
at
org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:825)
at
org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:784)
at
org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:755)
at
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
at
org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
at org.apache.hadoop.io.IOUtils.cleanup(IOUtils.java:247)
at
org.apache.hadoop.hdfs.server.datanode.web.webhdfs.HdfsWriter.releaseDfsResources(HdfsWriter.java:78)
at
org.apache.hadoop.hdfs.server.datanode.web.webhdfs.HdfsWriter.channelRead0(HdfsWriter.java:60)
at
org.apache.hadoop.hdfs.server.datanode.web.webhdfs.HdfsWriter.channelRead0(HdfsWriter.java:36)
at
io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)
at
io.netty.channel.ChannelHandlerInvokerUtil.invokeChannelReadNow(ChannelHandlerInvokerUtil.java:83)
at
io.netty.channel.DefaultChannelHandlerInvoker.invokeChannelRead(DefaultChannelHandlerInvoker.java:153)
at
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:157)
at
io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)
at
io.netty.channel.ChannelHandlerInvokerUtil.invokeChannelReadNow(ChannelHandlerInvokerUtil.java:83)
at
io.netty.channel.DefaultChannelHandlerInvoker.invokeChannelRead(DefaultChannelHandlerInvoker.java:153)
at
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:157)
at
io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:244)
at
io.netty.channel.ChannelHandlerInvokerUtil.invokeChannelReadNow(ChannelHandlerInvokerUtil.java:83)
at
io.netty.channel.DefaultChannelHandlerInvoker.invokeChannelRead(DefaultChannelHandlerInvoker.java:153)
at
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:157)
at
io.netty.handler.codec.ByteToMessageDecoder.handlerRemoved(ByteToMessageDecoder.java:203)
at
io.netty.channel.DefaultChannelPipeline.callHandlerRemoved0(DefaultChannelPipeline.java:627)
at
io.netty.channel.DefaultChannelPipeline.callHandlerRemoved(DefaultChannelPipeline.java:621)
at
io.netty.channel.DefaultChannelPipeline.remove0(DefaultChannelPipeline.java:450)
at
io.netty.channel.DefaultChannelPipeline.remove(DefaultChannelPipeline.java:421)
at
io.netty.channel.DefaultChannelPipeline.remove(DefaultChannelPipeline.java:398)
at
org.apache.hadoop.hdfs.server.datanode.web.PortUnificationServerHandler.decode(PortUnificationServerHandler.java:96)
at
io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:327)
at
io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:230)
at
io.netty.channel.ChannelHandlerInvokerUtil.invokeChannelReadNow(ChannelHandlerInvokerUtil.java:83)
at
io.netty.channel.DefaultChannelHandlerInvoker.invokeChannelRead(DefaultChannelHandlerInvoker.java:153)
at
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:157)
at
io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:946)
at
io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:127)
at
io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:510)
at
io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:467)
at
io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:381)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:353)
at
io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:703)
at
io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:137)
at java.lang.Thread.run(Thread.java:744)
"org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@61ce23ac"
daemon prio=5 tid=69 timed_waiting
java.lang.Thread.State: TIMED_WAITING
at java.lang.Thread.sleep(Native Method)
at
org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:3818)
at java.lang.Thread.run(Thread.java:744)
"IPC Server handler 2 on 52102" daemon prio=5 tid=98 timed_waiting
java.lang.Thread.State: TIMED_WAITING
at sun.misc.Unsafe.park(Native Method)
at
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at
java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:467)
at
org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:218)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2387)
"IPC Server handler 6 on 32787" daemon prio=5 tid=60 timed_waiting
java.lang.Thread.State: TIMED_WAITING
at sun.misc.Unsafe.park(Native Method)
at
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at
java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:467)
at
org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:218)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2387)
"org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3cc41abc" daemon
prio=5 tid=67 timed_waiting
java.lang.Thread.State: TIMED_WAITING
at java.lang.Thread.sleep(Native Method)
at
org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:339)
at java.lang.Thread.run(Thread.java:744)
"IPC Server handler 3 on 52102" daemon prio=5 tid=99 timed_waiting
java.lang.Thread.State: TIMED_WAITING
at sun.misc.Unsafe.park(Native Method)
at
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at
java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:467)
at
org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:218)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2387)
"IPC Server handler 6 on 52102" daemon prio=5 tid=102 timed_waiting
java.lang.Thread.State: TIMED_WAITING
at sun.misc.Unsafe.park(Native Method)
at
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at
java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:467)
at
org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:218)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2387)
"Block report processor" daemon prio=5 tid=40 in Object.wait()
java.lang.Thread.State: WAITING (on object monitor)
at sun.misc.Unsafe.park(Native Method)
Tests run: 2, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 16.744 sec <<<
FAILURE! - in org.apache.hadoop.hdfs.security.TestDelegationTokenForProxyUser
testWebHdfsDoAs(org.apache.hadoop.hdfs.security.TestDelegationTokenForProxyUser)
Time elapsed: 5.02 sec <<< ERROR!
java.lang.Exception: test timed out after 5000 milliseconds
at java.net.SocketInputStream.socketRead0(Native Method)
at java.net.SocketInputStream.read(SocketInputStream.java:150)
at java.net.SocketInputStream.read(SocketInputStream.java:121)
at java.io.BufferedInputStream.fill(BufferedInputStream.java:246)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:286)
at java.io.BufferedInputStream.read(BufferedInputStream.java:345)
at sun.net.www.http.HttpClient.parseHTTPHeader(HttpClient.java:701)
at sun.net.www.http.HttpClient.parseHTTPHeader(HttpClient.java:812)
at sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:647)
at
sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1534)
at
sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1439)
at
java.net.HttpURLConnection.getResponseCode(HttpURLConnection.java:480)
at
org.apache.hadoop.hdfs.web.WebHdfsFileSystem.validateResponse(WebHdfsFileSystem.java:430)
at
org.apache.hadoop.hdfs.web.WebHdfsFileSystem.access$200(WebHdfsFileSystem.java:108)
at
org.apache.hadoop.hdfs.web.WebHdfsFileSystem$FsPathOutputStreamRunner$1.close(WebHdfsFileSystem.java:910)
at
org.apache.hadoop.hdfs.security.TestDelegationTokenForProxyUser.testWebHdfsDoAs(TestDelegationTokenForProxyUser.java:170)
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 45.657 sec - in
org.apache.hadoop.hdfs.security.TestDelegationToken
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.TestDFSInputStream
at java.util.concurrent.locks.LockSupport.park(LockSupport.java:17
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.TestFileAppend4
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 25.866 sec - in
org.apache.hadoop.hdfs.TestFileCorruption
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.TestHdfsAdmin
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 32.386 sec - in
org.apache.hadoop.hdfs.TestDFSInputStream
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.491 sec - in
org.apache.hadoop.hdfs.TestHdfsAdmin
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.client.impl.TestClientBlockVerification
Running org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.479 sec - in
org.apache.hadoop.hdfs.client.impl.TestClientBlockVerification
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.client.impl.TestBlockReaderLocal
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 53.044 sec - in
org.apache.hadoop.hdfs.TestFileAppend4
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.758 sec - in
org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.client.impl.TestBlockReaderLocalLegacy
Running org.apache.hadoop.hdfs.client.impl.TestBlockReaderRemote
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.75 sec - in
org.apache.hadoop.hdfs.client.impl.TestBlockReaderRemote
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.client.impl.TestBlockReaderRemote2
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.438 sec - in
org.apache.hadoop.hdfs.client.impl.TestBlockReaderLocalLegacy
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.958 sec - in
org.apache.hadoop.hdfs.client.impl.TestBlockReaderRemote2
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure120
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.036 sec - in
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure120
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.TestSnapshotCommands
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.071 sec - in
org.apache.hadoop.hdfs.TestSnapshotCommands
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=768m;
support was removed in 8.0
Running org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure130
Results :
Tests in error:
TestAsyncDFSRename.testAggressiveConcurrentAsyncRenameWithOverwrite:184->internalTestConcurrentAsyncRenameWithOverwrite:220
»
TestAsyncDFSRename.testCallGetReturnValueMultipleTimes:125 » IO Cannot remove
...
TestAsyncDFSRename.testConservativeConcurrentAsyncRenameWithOverwrite:191->internalTestConcurrentAsyncRenameWithOverwrite:220->Object.wait:-2
»
TestEditLogTailer.testNN1TriggersLogRolls:146->testStandbyTriggersLogRolls:178->waitForLogRollInSharedDir:199
» Timeout
TestDataNodeUUID.testUUIDRegeneration:89 » test timed out after 10000
millise...
TestDelegationTokenForProxyUser.testWebHdfsDoAs:170 » test timed out after
50...
Tests run: 3914, Failures: 0, Errors: 6, Skipped: 14
[INFO]
[INFO] ------------------------------------------------------------------------
[INFO] Skipping Apache Hadoop HDFS Native Client
[INFO] This project has been banned from the build due to previous failures.
[INFO] ------------------------------------------------------------------------
[INFO]
[INFO] ------------------------------------------------------------------------
[INFO] Skipping Apache Hadoop HttpFS
[INFO] This project has been banned from the build due to previous failures.
[INFO] ------------------------------------------------------------------------
[INFO]
[INFO] ------------------------------------------------------------------------
[INFO] Skipping Apache Hadoop HDFS BookKeeper Journal
[INFO] This project has been banned from the build due to previous failures.
[INFO] ------------------------------------------------------------------------
[INFO]
[INFO] ------------------------------------------------------------------------
[INFO] Skipping Apache Hadoop HDFS-NFS
[INFO] This project has been banned from the build due to previous failures.
[INFO] ------------------------------------------------------------------------
[INFO]
[INFO] ------------------------------------------------------------------------
[INFO] Building Apache Hadoop HDFS Project 3.0.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO]
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ hadoop-hdfs-project
---
[INFO] Deleting
<https://builds.apache.org/job/Hadoop-Hdfs-trunk-Java8/ws/hadoop-hdfs-project/target>
[INFO]
[INFO] --- maven-antrun-plugin:1.7:run (create-testdirs) @ hadoop-hdfs-project
---
[INFO] Executing tasks
main:
[mkdir] Created dir:
<https://builds.apache.org/job/Hadoop-Hdfs-trunk-Java8/ws/hadoop-hdfs-project/target/test-dir>
[INFO] Executed tasks
[INFO]
[INFO] --- maven-source-plugin:2.3:jar-no-fork (hadoop-java-sources) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-source-plugin:2.3:test-jar-no-fork (hadoop-java-sources) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (dist-enforce) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-site-plugin:3.5:attach-descriptor (attach-descriptor) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-javadoc-plugin:2.8.1:jar (module-javadocs) @
hadoop-hdfs-project ---
[INFO] Not executing Javadoc as the project is not a Java classpath-capable
package
[INFO]
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (depcheck) @ hadoop-hdfs-project
---
[INFO]
[INFO] --- maven-checkstyle-plugin:2.15:checkstyle (default-cli) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- findbugs-maven-plugin:3.0.0:findbugs (default-cli) @
hadoop-hdfs-project ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO]
[INFO] Apache Hadoop HDFS Client ......................... SUCCESS [06:25 min]
[INFO] Apache Hadoop HDFS ................................ FAILURE [ 01:29 h]
[INFO] Apache Hadoop HDFS Native Client .................. SKIPPED
[INFO] Apache Hadoop HttpFS .............................. SKIPPED
[INFO] Apache Hadoop HDFS BookKeeper Journal ............. SKIPPED
[INFO] Apache Hadoop HDFS-NFS ............................ SKIPPED
[INFO] Apache Hadoop HDFS Project ........................ SUCCESS [ 0.513 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:36 h
[INFO] Finished at: 2016-05-11T07:49:47+00:00
[INFO] Final Memory: 95M/963M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal
org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on
project hadoop-hdfs: ExecutionException: java.lang.RuntimeException:
java.lang.RuntimeException: java.io.IOException: Stream Closed -> [Help 1]
[ERROR]
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR]
[ERROR] For more information about the errors and possible solutions, please
read the following articles:
[ERROR] [Help 1]
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR]
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR] mvn <goals> -rf :hadoop-hdfs
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]