See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/551/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 646141 lines...]
    [junit] 2011-01-13 13:38:19,127 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads 
have been shut down.
    [junit] 2011-01-13 13:38:19,128 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2011-01-13 13:38:19,128 INFO  hdfs.MiniDFSCluster 
(MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2011-01-13 13:38:19,230 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 39737
    [junit] 2011-01-13 13:38:19,230 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 0 on 39737: exiting
    [junit] 2011-01-13 13:38:19,230 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder
    [junit] 2011-01-13 13:38:19,230 WARN  datanode.DataNode 
(DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:45424, 
storageID=DS-1888505084-127.0.1.1-45424-1294925888090, infoPort=47456, 
ipcPort=39737):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit]     at 
java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit]     at 
sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit]     at 
sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-13 13:38:19,230 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 39737
    [junit] 2011-01-13 13:38:19,230 INFO  datanode.DataNode 
(DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads 
is 1
    [junit] 2011-01-13 13:38:19,332 INFO  datanode.DataBlockScanner 
(DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-13 13:38:19,332 INFO  datanode.DataNode 
(DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:45424, 
storageID=DS-1888505084-127.0.1.1-45424-1294925888090, infoPort=47456, 
ipcPort=39737):Finishing DataNode in: 
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-13 13:38:19,332 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 39737
    [junit] 2011-01-13 13:38:19,332 INFO  datanode.DataNode 
(DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2011-01-13 13:38:19,332 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk 
service threads...
    [junit] 2011-01-13 13:38:19,333 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads 
have been shut down.
    [junit] 2011-01-13 13:38:19,333 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2011-01-13 13:38:19,435 WARN  namenode.FSNamesystem 
(FSNamesystem.java:run(2828)) - ReplicationMonitor thread received 
InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-13 13:38:19,436 INFO  namenode.FSEditLog 
(FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time 
for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of 
syncs: 3 SyncTimes(ms): 5 2 
    [junit] 2011-01-13 13:38:19,435 WARN  namenode.DecommissionManager 
(DecommissionManager.java:run(70)) - Monitor interrupted: 
java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 37234
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 0 on 37234: exiting
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 5 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 3 on 37234: exiting
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 7 on 37234: exiting
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 2 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 6 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 4 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 1 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 8 on 37234: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.941 sec
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 9 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 37234
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: 
Tests failed!

Total time: 101 minutes 48 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) 
##############################
6 tests failed.
REGRESSION:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time 
until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in 
the report does not reflect the time until the timeout.


REGRESSION:  
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testErrorReplicas

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time 
until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in 
the report does not reflect the time until the timeout.


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: 
Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open 
files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at 
org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at 
org.apache.hadoop.util.Shell.run(Shell.java:188)  at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at 
org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at 
org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at 
org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
  at 
org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  
at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
  at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
  at 
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at 
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
  at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
  at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
  at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
  at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)  
at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)  at 
org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)  at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)  
at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
  at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
  at junit.framework.TestCase.runBare(TestCase.java:132)  at 
junit.framework.TestResult$1.protect(TestResult.java:110)  at 
junit.framework.TestResult.runProtected(TestResult.java:128)  at 
junit.framework.TestResult.run(TestResult.java:113)  at 
junit.framework.TestCase.run(TestCase.java:124)  at 
junit.framework.TestSuite.runTest(TestSuite.java:232)  at 
junit.framework.TestSuite.run(TestSuite.java:227)  at 
org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  
at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at 
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
  at 
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
  at 
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
 Caused by: java.io.IOException: java.io.IOException: error=24, Too many open 
files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at 
java.lang.ProcessImpl.start(ProcessImpl.java:65)  at 
java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions 
: java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: 
error=24, Too many open files
        at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
        at org.apache.hadoop.util.Shell.run(Shell.java:188)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
        at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
        at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
        at 
org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
        at 
org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
        at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
        at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
        at 
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
        at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open 
files
        at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
        at java.lang.ProcessImpl.start(ProcessImpl.java:65)
        at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

        at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
        at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
        at 
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
        at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)
        at 
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
        at 
org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
        at 
org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
        at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
        at 
org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:804)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqy9(TestFileConcurrentReader.java:275)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)
        at java.io.FileInputStream.open(Native Method)
        at java.io.FileInputStream.<init>(FileInputStream.java:106)
        at java.io.FileInputStream.<init>(FileInputStream.java:66)
        at 
sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
        at 
sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
        at 
com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
        at 
com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
        at 
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
        at 
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
        at 
com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
        at 
com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
        at 
com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
        at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
        at 
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 1a1e36ce754de9bd267aa770afefee09 but expecting 
cb0fe2de12a839f8f0f484037fc59364

Stack Trace:
java.io.IOException: Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 1a1e36ce754de9bd267aa770afefee09 but expecting 
cb0fe2de12a839f8f0f484037fc59364
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkw(TestStorageRestore.java:316)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)



Reply via email to