See https://builds.apache.org/job/Hadoop-Hdfs-trunk/2796/
###################################################################################
########################## LAST 60 LINES OF THE CONSOLE
###########################
[...truncated 7391 lines...]
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ hadoop-hdfs-project
---
[INFO] Deleting
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target
[INFO]
[INFO] --- maven-antrun-plugin:1.7:run (create-testdirs) @ hadoop-hdfs-project
---
[INFO] Executing tasks
main:
[mkdir] Created dir:
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target/test-dir
[INFO] Executed tasks
[INFO]
[INFO] --- maven-source-plugin:2.3:jar-no-fork (hadoop-java-sources) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-source-plugin:2.3:test-jar-no-fork (hadoop-java-sources) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (dist-enforce) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-site-plugin:3.4:attach-descriptor (attach-descriptor) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-javadoc-plugin:2.8.1:jar (module-javadocs) @
hadoop-hdfs-project ---
[INFO] Skipping javadoc generation
[INFO]
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (depcheck) @ hadoop-hdfs-project
---
[INFO]
[INFO] --- maven-checkstyle-plugin:2.15:checkstyle (default-cli) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- findbugs-maven-plugin:3.0.0:findbugs (default-cli) @
hadoop-hdfs-project ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO]
[INFO] Apache Hadoop HDFS Client ......................... SUCCESS [06:51 min]
[INFO] Apache Hadoop HDFS ................................ FAILURE [ 07:27 h]
[INFO] Apache Hadoop HDFS Native Client .................. SKIPPED
[INFO] Apache Hadoop HttpFS .............................. SKIPPED
[INFO] Apache Hadoop HDFS BookKeeper Journal ............. SKIPPED
[INFO] Apache Hadoop HDFS-NFS ............................ SKIPPED
[INFO] Apache Hadoop HDFS Project ........................ SUCCESS [ 0.117 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 07:34 h
[INFO] Finished at: 2016-02-03T11:26:32+00:00
[INFO] Final Memory: 70M/633M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal
org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on
project hadoop-hdfs: There was a timeout or other error in the fork -> [Help 1]
[ERROR]
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR]
[ERROR] For more information about the errors and possible solutions, please
read the following articles:
[ERROR] [Help 1]
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR]
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR] mvn <goals> -rf :hadoop-hdfs
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Sending e-mails to: [email protected]
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any
###################################################################################
############################## FAILED TESTS (if any)
##############################
8 tests failed.
FAILED:
org.apache.hadoop.hdfs.server.blockmanagement.TestUnderReplicatedBlocks.testSetrepIncWithUnderReplicatedBlocks
Error Message:
test timed out after 60000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
at java.lang.Thread.sleep(Native Method)
at
org.apache.hadoop.fs.shell.SetReplication.waitForReplication(SetReplication.java:127)
at
org.apache.hadoop.fs.shell.SetReplication.processArguments(SetReplication.java:77)
at
org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:119)
at org.apache.hadoop.fs.shell.Command.run(Command.java:166)
at org.apache.hadoop.fs.FsShell.run(FsShell.java:319)
at
org.apache.hadoop.hdfs.server.blockmanagement.TestUnderReplicatedBlocks.testSetrepIncWithUnderReplicatedBlocks(TestUnderReplicatedBlocks.java:70)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockReplacement.testDeletedBlockWhenAddBlockIsInEdit
Error Message:
The block should be only on 1 datanode expected:<1> but was:<2>
Stack Trace:
java.lang.AssertionError: The block should be only on 1 datanode expected:<1>
but was:<2>
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.failNotEquals(Assert.java:743)
at org.junit.Assert.assertEquals(Assert.java:118)
at org.junit.Assert.assertEquals(Assert.java:555)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockReplacement.testDeletedBlockWhenAddBlockIsInEdit(TestBlockReplacement.java:436)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockScanner.testVolumeIteratorWithCaching
Error Message:
test timed out after 60000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
at sun.nio.ch.FileDispatcherImpl.write0(Native Method)
at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:47)
at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:93)
at sun.nio.ch.IOUtil.write(IOUtil.java:65)
at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:487)
at
org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:63)
at
org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
at
org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:159)
at
org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117)
at
java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82)
at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140)
at java.io.DataOutputStream.flush(DataOutputStream.java:123)
at
org.apache.hadoop.hdfs.protocol.datatransfer.Sender.send(Sender.java:79)
at
org.apache.hadoop.hdfs.protocol.datatransfer.Sender.readBlock(Sender.java:112)
at
org.apache.hadoop.hdfs.RemoteBlockReader2.newBlockReader(RemoteBlockReader2.java:404)
at
org.apache.hadoop.hdfs.BlockReaderFactory.getRemoteBlockReader(BlockReaderFactory.java:842)
at
org.apache.hadoop.hdfs.BlockReaderFactory.getRemoteBlockReaderFromTcp(BlockReaderFactory.java:733)
at
org.apache.hadoop.hdfs.BlockReaderFactory.build(BlockReaderFactory.java:375)
at
org.apache.hadoop.hdfs.DFSInputStream.getBlockReader(DFSInputStream.java:695)
at
org.apache.hadoop.hdfs.DFSInputStream.blockSeekTo(DFSInputStream.java:654)
at
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:926)
at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:979)
at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:750)
at java.io.DataInputStream.readByte(DataInputStream.java:265)
at
org.apache.hadoop.hdfs.DFSTestUtil.getFirstBlock(DFSTestUtil.java:783)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockScanner$TestContext.getFileBlock(TestBlockScanner.java:140)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockScanner.testVolumeIteratorImpl(TestBlockScanner.java:174)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockScanner.testVolumeIteratorWithCaching(TestBlockScanner.java:250)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailure.testUnderReplicationAfterVolFailure
Error Message:
There is no under replicated block after volume failure
Stack Trace:
java.lang.AssertionError: There is no under replicated block after volume
failure
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.assertTrue(Assert.java:41)
at
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailure.testUnderReplicationAfterVolFailure(TestDataNodeVolumeFailure.java:403)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.testThrottling
Error Message:
test timed out after 300000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 300000 milliseconds
at java.lang.Object.wait(Native Method)
at java.lang.Object.wait(Object.java:503)
at
org.apache.hadoop.hdfs.DataStreamer.waitAndQueuePacket(DataStreamer.java:804)
at
org.apache.hadoop.hdfs.DFSOutputStream.enqueueCurrentPacket(DFSOutputStream.java:423)
at
org.apache.hadoop.hdfs.DFSOutputStream.enqueueCurrentPacketFull(DFSOutputStream.java:432)
at
org.apache.hadoop.hdfs.DFSOutputStream.writeChunk(DFSOutputStream.java:418)
at
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:125)
at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:111)
at
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:57)
at java.io.DataOutputStream.write(DataOutputStream.java:107)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:418)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:376)
at
org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.createFile(TestDirectoryScanner.java:108)
at
org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.testThrottling(TestDirectoryScanner.java:584)
FAILED:
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestSpaceReservation.testTmpSpaceReserve
Error Message:
Wrong reserve space for Tmp expected:<200> but was:<1000>
Stack Trace:
java.lang.AssertionError: Wrong reserve space for Tmp expected:<200> but
was:<1000>
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.failNotEquals(Assert.java:743)
at org.junit.Assert.assertEquals(Assert.java:118)
at org.junit.Assert.assertEquals(Assert.java:555)
at
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestSpaceReservation.testTmpSpaceReserve(TestSpaceReservation.java:454)
FAILED:
org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.largeDelete
Error Message:
org/apache/hadoop/util/IntrusiveCollection$IntrusiveIterator
Stack Trace:
java.lang.NoClassDefFoundError:
org/apache/hadoop/util/IntrusiveCollection$IntrusiveIterator
at
org.apache.hadoop.util.IntrusiveCollection.iterator(IntrusiveCollection.java:213)
at
org.apache.hadoop.util.IntrusiveCollection.clear(IntrusiveCollection.java:368)
at
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.clearPendingCachingCommands(DatanodeManager.java:1580)
at
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.stopActiveServices(FSNamesystem.java:1186)
at
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:1531)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.stopCommonServices(NameNode.java:774)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.stop(NameNode.java:953)
at
org.apache.hadoop.hdfs.MiniDFSCluster.stopAndJoinNameNode(MiniDFSCluster.java:1965)
at
org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1911)
at
org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1882)
at
org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1875)
at
org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.largeDelete(TestLargeDirectoryDelete.java:222)
FAILED:
org.apache.hadoop.hdfs.server.namenode.ha.TestHAAppend.testMultipleAppendsDuringCatchupTailing
Error Message:
inode should complete in ~60000 ms.
Expected: is <true>
but: was <false>
Stack Trace:
java.lang.AssertionError: inode should complete in ~60000 ms.
Expected: is <true>
but: was <false>
at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:20)
at org.junit.Assert.assertThat(Assert.java:865)
at
org.apache.hadoop.hdfs.server.namenode.TestFileTruncate.checkBlockRecovery(TestFileTruncate.java:1196)
at
org.apache.hadoop.hdfs.server.namenode.ha.TestHAAppend.testMultipleAppendsDuringCatchupTailing(TestHAAppend.java:125)