See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/517/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 826746 lines...]
    [junit]     at 
org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit]     at 
org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit]     at 
org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit]     at 
org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit]     at java.lang.Thread.sleep(Native Method)
    [junit]     at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit]     ... 11 more
    [junit] 2010-12-11 13:21:11,346 INFO  datanode.DataNode 
(DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2010-12-11 13:21:11,411 INFO  datanode.DataBlockScanner 
(DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-11 13:21:11,446 INFO  datanode.DataNode 
(DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:60926, 
storageID=DS-217843813-127.0.1.1-60926-1292073660370, infoPort=42538, 
ipcPort=56175):Finishing DataNode in: 
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-11 13:21:11,446 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 56175
    [junit] 2010-12-11 13:21:11,447 INFO  datanode.DataNode 
(DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2010-12-11 13:21:11,447 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk 
service threads...
    [junit] 2010-12-11 13:21:11,447 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads 
have been shut down.
    [junit] 2010-12-11 13:21:11,447 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2010-12-11 13:21:11,549 WARN  namenode.DecommissionManager 
(DecommissionManager.java:run(70)) - Monitor interrupted: 
java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-11 13:21:11,549 INFO  namenode.FSEditLog 
(FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time 
for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of 
syncs: 3 SyncTimes(ms): 4 4 
    [junit] 2010-12-11 13:21:11,550 WARN  namenode.FSNamesystem 
(FSNamesystem.java:run(2822)) - ReplicationMonitor thread received 
InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-11 13:21:11,551 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 38934
    [junit] 2010-12-11 13:21:11,551 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 0 on 38934: exiting
    [junit] 2010-12-11 13:21:11,551 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 1 on 38934: exiting
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 8 on 38934: exiting
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 4 on 38934: exiting
    [junit] 2010-12-11 13:21:11,554 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 9 on 38934: exiting
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 3 on 38934: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.17 sec
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 7 on 38934: exiting
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder
    [junit] 2010-12-11 13:21:11,551 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 38934
    [junit] 2010-12-11 13:21:11,554 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 2 on 38934: exiting
    [junit] 2010-12-11 13:21:11,554 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 6 on 38934: exiting
    [junit] 2010-12-11 13:21:11,553 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 5 on 38934: exiting

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: 
Tests failed!

Total time: 105 minutes 22 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) 
##############################
3 tests failed.
REGRESSION:  
org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks 
expected:<2> but was:<1>
        at 
org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jqf1(TestBlockReport.java:414)
        at 
org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)
        at 
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
        at 
org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
        at 
org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
        at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
        at 
org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqtz(TestFileConcurrentReader.java:275)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)
        at java.io.FileInputStream.open(Native Method)
        at java.io.FileInputStream.<init>(FileInputStream.java:106)
        at java.io.FileInputStream.<init>(FileInputStream.java:66)
        at 
sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
        at 
sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
        at 
com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
        at 
com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
        at 
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
        at 
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
        at 
com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
        at 
com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
        at 
com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
        at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
        at 
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of c473986841e7cc311c1abf192dea4637 but expecting 
ead7e9810b7bfce73a3e69e4661b15aa

Stack Trace:
java.io.IOException: Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of c473986841e7cc311c1abf192dea4637 but expecting 
ead7e9810b7bfce73a3e69e4661b15aa
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tgn(TestStorageRestore.java:316)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)



Reply via email to