See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/521/
################################################################################### ########################## LAST 60 LINES OF THE CONSOLE ########################### [...truncated 821928 lines...] [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1) [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116) [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193) [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437) [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625) [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389) [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389) [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331) [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130) [junit] at java.lang.Thread.run(Thread.java:619) [junit] Caused by: java.lang.InterruptedException: sleep interrupted [junit] at java.lang.Thread.sleep(Native Method) [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80) [junit] ... 11 more [junit] 2010-12-15 13:24:10,823 INFO datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1 [junit] 2010-12-15 13:24:10,924 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread. [junit] 2010-12-15 13:24:10,924 INFO datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:34915, storageID=DS-1460572231-127.0.1.1-34915-1292419439913, infoPort=56390, ipcPort=42973):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'} [junit] 2010-12-15 13:24:10,924 INFO ipc.Server (Server.java:stop(1611)) - Stopping server on 42973 [junit] 2010-12-15 13:24:10,924 INFO datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0 [junit] 2010-12-15 13:24:10,925 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads... [junit] 2010-12-15 13:24:10,925 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down. [junit] 2010-12-15 13:24:10,926 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down. [junit] 2010-12-15 13:24:11,027 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted [junit] 2010-12-15 13:24:11,028 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 [junit] 2010-12-15 13:24:11,028 WARN namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted [junit] 2010-12-15 13:24:11,029 INFO ipc.Server (Server.java:stop(1611)) - Stopping server on 51182 [junit] 2010-12-15 13:24:11,029 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 51182: exiting [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.215 sec [junit] 2010-12-15 13:24:11,029 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 51182: exiting [junit] 2010-12-15 13:24:11,032 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 51182: exiting [junit] 2010-12-15 13:24:11,030 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 51182 [junit] 2010-12-15 13:24:11,044 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 51182: exiting [junit] 2010-12-15 13:24:11,030 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder [junit] 2010-12-15 13:24:11,045 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 51182: exiting [junit] 2010-12-15 13:24:11,030 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 51182: exiting [junit] 2010-12-15 13:24:11,030 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 51182: exiting [junit] 2010-12-15 13:24:11,044 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 51182: exiting [junit] 2010-12-15 13:24:11,044 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 51182: exiting [junit] 2010-12-15 13:24:11,044 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 51182: exiting checkfailure: run-test-hdfs-all-withtestcaseonly: run-test-hdfs: BUILD FAILED /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed! Total time: 108 minutes 14 seconds [FINDBUGS] Skipping publisher since build result is FAILURE Publishing Javadoc Archiving artifacts Recording test results Recording fingerprints Publishing Clover coverage report... No Clover report will be published due to a Build Failure Email was triggered for: Failure Sending email for trigger: Failure ################################################################################### ############################## FAILED TESTS (if any) ############################## 4 tests failed. REGRESSION: org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer Error Message: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked. Stack Trace: java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked. at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615) at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333) at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351) at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408) at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202) at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451) at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186) at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71) at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178) at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88) at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73) REGRESSION: org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite Error Message: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked. Stack Trace: java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked. at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615) at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333) at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351) at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408) at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202) at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451) at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186) at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71) at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178) at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88) at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73) FAILED: org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite Error Message: Too many open files Stack Trace: java.io.IOException: Too many open files at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method) at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68) at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52) at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18) at java.nio.channels.Selector.open(Selector.java:209) at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602) at org.apache.hadoop.ipc.Server.<init>(Server.java:1511) at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408) at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332) at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292) at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47) at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382) at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416) at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507) at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281) at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263) at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561) at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504) at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471) at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630) at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464) at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186) at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71) at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178) at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88) at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73) FAILED: org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore Error Message: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of ea0c381daf031b66603e1e64deda0a33 but expecting db35a63d2e6e5c3efbe94772b0cc1bc6 Stack Trace: java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of ea0c381daf031b66603e1e64deda0a33 but expecting db35a63d2e6e5c3efbe94772b0cc1bc6 at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063) at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702) at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600) at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477) at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438) at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316) at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)