See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/514/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 808061 lines...]
    [junit]     at 
org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit]     at 
org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit]     at 
org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit]     at 
org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit]     at java.lang.Thread.sleep(Native Method)
    [junit]     at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit]     ... 11 more
    [junit] 2010-12-08 15:43:09,263 INFO  datanode.DataNode 
(DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2010-12-08 15:43:09,324 INFO  datanode.DataBlockScanner 
(DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-08 15:43:09,363 INFO  datanode.DataNode 
(DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:46844, 
storageID=DS-2126632194-127.0.1.1-46844-1291822978276, infoPort=47150, 
ipcPort=34237):Finishing DataNode in: 
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-08 15:43:09,363 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 34237
    [junit] 2010-12-08 15:43:09,363 INFO  datanode.DataNode 
(DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2010-12-08 15:43:09,364 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk 
service threads...
    [junit] 2010-12-08 15:43:09,364 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads 
have been shut down.
    [junit] 2010-12-08 15:43:09,364 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2010-12-08 15:43:09,466 WARN  namenode.FSNamesystem 
(FSNamesystem.java:run(2822)) - ReplicationMonitor thread received 
InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-08 15:43:09,466 INFO  namenode.FSEditLog 
(FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time 
for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of 
syncs: 3 SyncTimes(ms): 10 2 
    [junit] 2010-12-08 15:43:09,466 WARN  namenode.DecommissionManager 
(DecommissionManager.java:run(70)) - Monitor interrupted: 
java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-08 15:43:09,467 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 60810
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 0 on 60810: exiting
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 6 on 60810: exiting
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 4 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 7 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 8 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 2 on 60810: exiting
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 5 on 60810: exiting
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 60810
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 3 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 1 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 9 on 60810: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 78.388 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: 
Tests failed!

Total time: 250 minutes 9 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) 
##############################
9 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
        at java.io.FileInputStream.readBytes(Native Method)
        at java.io.FileInputStream.read(FileInputStream.java:199)
        at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
        at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
        at java.io.BufferedInputStream.fill(BufferedInputStream.java:218)
        at java.io.BufferedInputStream.read1(BufferedInputStream.java:258)
        at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
        at 
sun.security.provider.SeedGenerator$URLSeedGenerator.getSeedByte(SeedGenerator.java:453)
        at 
sun.security.provider.SeedGenerator.getSeedBytes(SeedGenerator.java:123)
        at 
sun.security.provider.SeedGenerator.generateSeed(SeedGenerator.java:118)
        at 
sun.security.provider.SecureRandom.engineGenerateSeed(SecureRandom.java:114)
        at 
sun.security.provider.SecureRandom.engineNextBytes(SecureRandom.java:171)
        at java.security.SecureRandom.nextBytes(SecureRandom.java:433)
        at java.security.SecureRandom.next(SecureRandom.java:455)
        at java.util.Random.nextLong(Random.java:284)
        at 
org.mortbay.jetty.servlet.HashSessionIdManager.doStart(HashSessionIdManager.java:139)
        at 
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
        at 
org.mortbay.jetty.servlet.AbstractSessionManager.doStart(AbstractSessionManager.java:168)
        at 
org.mortbay.jetty.servlet.HashSessionManager.doStart(HashSessionManager.java:67)
        at 
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
        at 
org.mortbay.jetty.servlet.SessionHandler.doStart(SessionHandler.java:115)
        at 
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
        at 
org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
        at 
org.mortbay.jetty.handler.ContextHandler.startContext(ContextHandler.java:537)
        at org.mortbay.jetty.servlet.Context.startContext(Context.java:136)
        at 
org.mortbay.jetty.webapp.WebAppContext.startContext(WebAppContext.java:1234)
        at 
org.mortbay.jetty.handler.ContextHandler.doStart(ContextHandler.java:517)
        at 
org.mortbay.jetty.webapp.WebAppContext.doStart(WebAppContext.java:460)
        at 
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
        at 
org.mortbay.jetty.handler.HandlerCollection.doStart(HandlerCollection.java:152)
        at 
org.mortbay.jetty.handler.ContextHandlerCollection.doStart(ContextHandlerCollection.java:156)
        at 
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
        at 
org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
        at org.mortbay.jetty.Server.doStart(Server.java:222)
        at 
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
        at org.apache.hadoop.http.HttpServer.start(HttpServer.java:618)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:516)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:461)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:396)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:461)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.activate(NameNode.java:405)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:389)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:578)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:571)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1534)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:445)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
        at 
org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_21z1ppcxv6(TestFileAppend4.java:151)
        at 
org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock(TestFileAppend4.java:150)


REGRESSION:  
org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
        at 
org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_269ddf9xwa(TestFileAppend4.java:222)
        at 
org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile(TestFileAppend4.java:221)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
        at sun.nio.ch.IOUtil.initPipe(Native Method)
        at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
        at 
sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
        at java.nio.channels.Selector.open(Selector.java:209)
        at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
        at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
        at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
        at 
org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
        at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brtf(TestBalancer.java:327)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rto(TestBalancer.java:344)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
        at 
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr5x(TestBlockTokenWithDFS.java:529)
        at 
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 21ca7add645d906726cbf9a78dbae90f but expecting 
8e2f096b8ca2253e32f060fd86240bb9

Stack Trace:
java.io.IOException: Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 21ca7add645d906726cbf9a78dbae90f but expecting 
8e2f096b8ca2253e32f060fd86240bb9
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4te6(TestStorageRestore.java:316)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)



Reply via email to