See https://builds.apache.org/job/Hadoop-Hdfs-trunk/2496/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 6753 lines...]
[INFO] 
[INFO] --- maven-antrun-plugin:1.7:run (create-testdirs) @ hadoop-hdfs-project 
---
[INFO] Executing tasks

main:
    [mkdir] Created dir: 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target/test-dir
[INFO] Executed tasks
[INFO] 
[INFO] --- maven-source-plugin:2.3:jar-no-fork (hadoop-java-sources) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-source-plugin:2.3:test-jar-no-fork (hadoop-java-sources) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (dist-enforce) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-site-plugin:3.4:attach-descriptor (attach-descriptor) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-javadoc-plugin:2.8.1:jar (module-javadocs) @ 
hadoop-hdfs-project ---
[INFO] Skipping javadoc generation
[INFO] 
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (depcheck) @ hadoop-hdfs-project 
---
[INFO] 
[INFO] --- maven-checkstyle-plugin:2.15:checkstyle (default-cli) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- findbugs-maven-plugin:3.0.0:findbugs (default-cli) @ 
hadoop-hdfs-project ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop HDFS Client ......................... SUCCESS [04:32 min]
[INFO] Apache Hadoop HDFS ................................ FAILURE [  01:27 h]
[INFO] Apache Hadoop HDFS Native Client .................. SKIPPED
[INFO] Apache Hadoop HttpFS .............................. SKIPPED
[INFO] Apache Hadoop HDFS BookKeeper Journal ............. SKIPPED
[INFO] Apache Hadoop HDFS-NFS ............................ SKIPPED
[INFO] Apache Hadoop HDFS Project ........................ SUCCESS [  0.099 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:32 h
[INFO] Finished at: 2015-10-31T06:06:19+00:00
[INFO] Final Memory: 82M/1055M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on 
project hadoop-hdfs: ExecutionException: java.lang.RuntimeException: The forked 
VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs
 && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m 
-XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire/surefirebooter7974953538055697117.jar
 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire/surefire8900298493226276334tmp
 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire/surefire_98181936500680930824tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-hdfs
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Sending e-mails to: hdfs-dev@hadoop.apache.org
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) 
##############################
66 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure000.test5

Error Message:
failed, dn=0, length=65536java.io.IOException: Failed at i=4607
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.write(TestDFSStripedOutputStreamWithFailure.java:402)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:378)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test5(TestDFSStripedOutputStreamWithFailure.java:494)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
 at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:606)
 at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
 at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
 at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
 at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
 at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.io.IOException: Failed to get 6 nodes from namenode: 
blockGroupSize= 9, blocks.length= 5
 at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:441)
 at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:482)
 at 
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
 at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:164)
 at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:145)
 at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:79)
 at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:48)
 at java.io.DataOutputStream.write(DataOutputStream.java:88)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.write(TestDFSStripedOutputStreamWithFailure.java:400)
 ... 13 more


Stack Trace:
java.lang.AssertionError: failed, dn=0, length=65536java.io.IOException: Failed 
at i=4607
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.write(TestDFSStripedOutputStreamWithFailure.java:402)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:378)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test5(TestDFSStripedOutputStreamWithFailure.java:494)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.io.IOException: Failed to get 6 nodes from namenode: 
blockGroupSize= 9, blocks.length= 5
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:441)
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:482)
        at 
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
        at 
org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:164)
        at 
org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:145)
        at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:79)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:48)
        at java.io.DataOutputStream.write(DataOutputStream.java:88)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.write(TestDFSStripedOutputStreamWithFailure.java:400)
        ... 13 more

        at org.junit.Assert.fail(Assert.java:88)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:286)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test5(TestDFSStripedOutputStreamWithFailure.java:494)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure040.test0

Error Message:
failed, dn=0, length=851967java.lang.AssertionError: expected:<1001> but 
was:<1002>
 at org.junit.Assert.fail(Assert.java:88)
 at org.junit.Assert.failNotEquals(Assert.java:743)
 at org.junit.Assert.assertEquals(Assert.java:118)
 at org.junit.Assert.assertEquals(Assert.java:555)
 at org.junit.Assert.assertEquals(Assert.java:542)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test0(TestDFSStripedOutputStreamWithFailure.java:489)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
 at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:606)
 at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
 at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
 at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
 at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
 at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


Stack Trace:
java.lang.AssertionError: failed, dn=0, length=851967java.lang.AssertionError: 
expected:<1001> but was:<1002>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test0(TestDFSStripedOutputStreamWithFailure.java:489)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)

        at org.junit.Assert.fail(Assert.java:88)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:286)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test0(TestDFSStripedOutputStreamWithFailure.java:489)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure040.test3

Error Message:
failed, dn=0, length=917503java.lang.AssertionError: expected:<1001> but 
was:<1002>
 at org.junit.Assert.fail(Assert.java:88)
 at org.junit.Assert.failNotEquals(Assert.java:743)
 at org.junit.Assert.assertEquals(Assert.java:118)
 at org.junit.Assert.assertEquals(Assert.java:555)
 at org.junit.Assert.assertEquals(Assert.java:542)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test3(TestDFSStripedOutputStreamWithFailure.java:492)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
 at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:606)
 at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
 at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
 at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
 at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
 at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


Stack Trace:
java.lang.AssertionError: failed, dn=0, length=917503java.lang.AssertionError: 
expected:<1001> but was:<1002>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test3(TestDFSStripedOutputStreamWithFailure.java:492)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)

        at org.junit.Assert.fail(Assert.java:88)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:286)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test3(TestDFSStripedOutputStreamWithFailure.java:492)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure050.test3

Error Message:
failed, dn=0, length=1114112java.lang.AssertionError: expected:<1001> but 
was:<1002>
 at org.junit.Assert.fail(Assert.java:88)
 at org.junit.Assert.failNotEquals(Assert.java:743)
 at org.junit.Assert.assertEquals(Assert.java:118)
 at org.junit.Assert.assertEquals(Assert.java:555)
 at org.junit.Assert.assertEquals(Assert.java:542)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test3(TestDFSStripedOutputStreamWithFailure.java:492)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
 at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:606)
 at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
 at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
 at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
 at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
 at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


Stack Trace:
java.lang.AssertionError: failed, dn=0, length=1114112java.lang.AssertionError: 
expected:<1001> but was:<1002>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test3(TestDFSStripedOutputStreamWithFailure.java:492)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)

        at org.junit.Assert.fail(Assert.java:88)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:286)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test3(TestDFSStripedOutputStreamWithFailure.java:492)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure140.test4

Error Message:
failed, dn=0, length=3080193java.lang.AssertionError: expected:<1001> but 
was:<1002>
 at org.junit.Assert.fail(Assert.java:88)
 at org.junit.Assert.failNotEquals(Assert.java:743)
 at org.junit.Assert.assertEquals(Assert.java:118)
 at org.junit.Assert.assertEquals(Assert.java:555)
 at org.junit.Assert.assertEquals(Assert.java:542)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test4(TestDFSStripedOutputStreamWithFailure.java:493)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
 at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:606)
 at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
 at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
 at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
 at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
 at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


Stack Trace:
java.lang.AssertionError: failed, dn=0, length=3080193java.lang.AssertionError: 
expected:<1001> but was:<1002>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test4(TestDFSStripedOutputStreamWithFailure.java:493)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)

        at org.junit.Assert.fail(Assert.java:88)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:286)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test4(TestDFSStripedOutputStreamWithFailure.java:493)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure190.test6

Error Message:
failed, dn=0, length=4259839java.lang.AssertionError: expected:<1003> but 
was:<1004>
 at org.junit.Assert.fail(Assert.java:88)
 at org.junit.Assert.failNotEquals(Assert.java:743)
 at org.junit.Assert.assertEquals(Assert.java:118)
 at org.junit.Assert.assertEquals(Assert.java:555)
 at org.junit.Assert.assertEquals(Assert.java:542)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
 at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test6(TestDFSStripedOutputStreamWithFailure.java:495)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
 at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:606)
 at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
 at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
 at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
 at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
 at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


Stack Trace:
java.lang.AssertionError: failed, dn=0, length=4259839java.lang.AssertionError: 
expected:<1003> but was:<1004>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:362)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:281)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test6(TestDFSStripedOutputStreamWithFailure.java:495)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)

        at org.junit.Assert.fail(Assert.java:88)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.runTest(TestDFSStripedOutputStreamWithFailure.java:286)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.run(TestDFSStripedOutputStreamWithFailure.java:486)
        at 
org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure$TestBase.test6(TestDFSStripedOutputStreamWithFailure.java:495)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.hdfs.TestEncryptedTransfer.testEncryptedAppend[0]

Error Message:
Failed to replace a bad datanode on the existing pipeline due to no more good 
datanodes being available to try. (Nodes: 
current=[DatanodeInfoWithStorage[127.0.0.1:49599,DS-d77f8ad5-87f2-440a-862f-c6eca75525bd,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:56984,DS-7d266787-0028-4b50-960a-eebbf2689004,DISK]],
 
original=[DatanodeInfoWithStorage[127.0.0.1:49599,DS-d77f8ad5-87f2-440a-862f-c6eca75525bd,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:56984,DS-7d266787-0028-4b50-960a-eebbf2689004,DISK]]).
 The current failed datanode replacement policy is DEFAULT, and a client may 
configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' 
in its configuration.

Stack Trace:
java.io.IOException: Failed to replace a bad datanode on the existing pipeline 
due to no more good datanodes being available to try. (Nodes: 
current=[DatanodeInfoWithStorage[127.0.0.1:49599,DS-d77f8ad5-87f2-440a-862f-c6eca75525bd,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:56984,DS-7d266787-0028-4b50-960a-eebbf2689004,DISK]],
 
original=[DatanodeInfoWithStorage[127.0.0.1:49599,DS-d77f8ad5-87f2-440a-862f-c6eca75525bd,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:56984,DS-7d266787-0028-4b50-960a-eebbf2689004,DISK]]).
 The current failed datanode replacement policy is DEFAULT, and a client may 
configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' 
in its configuration.
        at 
org.apache.hadoop.hdfs.DataStreamer.findNewDatanode(DataStreamer.java:1165)
        at 
org.apache.hadoop.hdfs.DataStreamer.addDatanode2ExistingPipeline(DataStreamer.java:1235)
        at 
org.apache.hadoop.hdfs.DataStreamer.handleDatanodeReplacement(DataStreamer.java:1426)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1341)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1324)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:598)


FAILED:  org.apache.hadoop.hdfs.TestEncryptedTransfer.testEncryptedAppend[1]

Error Message:
Failed to replace a bad datanode on the existing pipeline due to no more good 
datanodes being available to try. (Nodes: 
current=[DatanodeInfoWithStorage[127.0.0.1:40694,DS-e4f42ce5-6b73-4c62-a175-47072c909eed,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:42882,DS-5f15e449-172e-4f0c-849b-7e23ac5593cf,DISK]],
 
original=[DatanodeInfoWithStorage[127.0.0.1:40694,DS-e4f42ce5-6b73-4c62-a175-47072c909eed,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:42882,DS-5f15e449-172e-4f0c-849b-7e23ac5593cf,DISK]]).
 The current failed datanode replacement policy is DEFAULT, and a client may 
configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' 
in its configuration.

Stack Trace:
java.io.IOException: Failed to replace a bad datanode on the existing pipeline 
due to no more good datanodes being available to try. (Nodes: 
current=[DatanodeInfoWithStorage[127.0.0.1:40694,DS-e4f42ce5-6b73-4c62-a175-47072c909eed,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:42882,DS-5f15e449-172e-4f0c-849b-7e23ac5593cf,DISK]],
 
original=[DatanodeInfoWithStorage[127.0.0.1:40694,DS-e4f42ce5-6b73-4c62-a175-47072c909eed,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:42882,DS-5f15e449-172e-4f0c-849b-7e23ac5593cf,DISK]]).
 The current failed datanode replacement policy is DEFAULT, and a client may 
configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' 
in its configuration.
        at 
org.apache.hadoop.hdfs.DataStreamer.findNewDatanode(DataStreamer.java:1165)
        at 
org.apache.hadoop.hdfs.DataStreamer.addDatanode2ExistingPipeline(DataStreamer.java:1235)
        at 
org.apache.hadoop.hdfs.DataStreamer.handleDatanodeReplacement(DataStreamer.java:1426)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1341)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1324)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:598)


FAILED:  
org.apache.hadoop.hdfs.TestEncryptedTransfer.testEncryptedAppendRequiringBlockTransfer[1]

Error Message:
expected:<3> but was:<2>

Stack Trace:
java.lang.AssertionError: expected:<3> but was:<2>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.TestEncryptedTransfer.testEncryptedAppendRequiringBlockTransfer(TestEncryptedTransfer.java:583)


FAILED:  
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testTwoReplicaShouldNotInSameDN

Error Message:
Timed out waiting for /bar/foo to reach 2 replicas

Stack Trace:
java.util.concurrent.TimeoutException: Timed out waiting for /bar/foo to reach 
2 replicas
        at 
org.apache.hadoop.hdfs.DFSTestUtil.waitReplication(DFSTestUtil.java:768)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.createFile(TestBalancer.java:176)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testTwoReplicaShouldNotInSameDN(TestBalancer.java:1500)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFS.testRead

Error Message:
End of File Exception between local host is: 
"asf909.gq1.ygridcore.net/67.195.81.153"; destination host is: 
"localhost":49098; : java.io.EOFException; For more details see:  
http://wiki.apache.org/hadoop/EOFException

Stack Trace:
java.io.EOFException: End of File Exception between local host is: 
"asf909.gq1.ygridcore.net/67.195.81.153"; destination host is: 
"localhost":49098; : java.io.EOFException; For more details see:  
http://wiki.apache.org/hadoop/EOFException
        at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
        at 
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
        at 
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
        at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
        at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
        at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:765)
        at org.apache.hadoop.ipc.Client.call(Client.java:1452)
        at org.apache.hadoop.ipc.Client.call(Client.java:1385)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
        at com.sun.proxy.$Proxy19.getBlockLocations(Unknown Source)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:253)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
        at com.sun.proxy.$Proxy20.getBlockLocations(Unknown Source)
        at 
org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:829)
        at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:818)
        at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:807)
        at 
org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:314)
        at 
org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:275)
        at 
org.apache.hadoop.hdfs.DFSInputStream.chooseDataNode(DFSInputStream.java:1017)
        at 
org.apache.hadoop.hdfs.DFSInputStream.blockSeekTo(DFSInputStream.java:612)
        at 
org.apache.hadoop.hdfs.DFSInputStream.seekToNewSource(DFSInputStream.java:1577)
        at 
org.apache.hadoop.fs.FSDataInputStream.seekToNewSource(FSDataInputStream.java:127)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFS.doTestRead(TestBlockTokenWithDFS.java:519)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFS.testRead(TestBlockTokenWithDFS.java:355)
Caused by: java.io.EOFException: null
        at java.io.DataInputStream.readInt(DataInputStream.java:392)
        at 
org.apache.hadoop.ipc.Client$Connection.receiveRpcResponse(Client.java:1110)
        at org.apache.hadoop.ipc.Client$Connection.run(Client.java:1005)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestRBWBlockInvalidation.testBlockInvalidationWhenRBWReplicaMissedInDN

Error Message:
All datanodes 
[DatanodeInfoWithStorage[127.0.0.1:46144,DS-05efbc91-6759-4eb7-8c20-e5d34d44d375,DISK]]
 are bad. Aborting...

Stack Trace:
java.io.IOException: All datanodes 
[DatanodeInfoWithStorage[127.0.0.1:46144,DS-05efbc91-6759-4eb7-8c20-e5d34d44d375,DISK]]
 are bad. Aborting...
        at 
org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1393)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1337)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1324)
        at 
org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1122)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:544)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget3[0]

Error Message:
expected:<[DISK]s2:NORMAL:2.2.2.2:50010> but was:<[DISK]s3:NORMAL:3.3.3.3:50010>

Stack Trace:
java.lang.AssertionError: expected:<[DISK]s2:NORMAL:2.2.2.2:50010> but 
was:<[DISK]s3:NORMAL:3.3.3.3:50010>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:144)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget3(TestReplicationPolicy.java:320)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget5[1]

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget5(TestReplicationPolicy.java:413)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testRereplicateOnBoundaryTopology

Error Message:
0

Stack Trace:
java.lang.ArrayIndexOutOfBoundsException: 0
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testRereplicateOnBoundaryTopology(TestReplicationPolicyWithNodeGroup.java:631)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testChooseTargetForLocalStorage

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testChooseTargetForLocalStorage(TestReplicationPolicyWithNodeGroup.java:415)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testChooseTarget1

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertFalse(Assert.java:64)
        at org.junit.Assert.assertFalse(Assert.java:74)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testChooseTarget1(TestReplicationPolicyWithNodeGroup.java:210)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testChooseTarget5

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testChooseTarget5(TestReplicationPolicyWithNodeGroup.java:394)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithUpgradeDomain.testChooseTargetWithExcludeNodes

Error Message:
expected:<3> but was:<2>

Stack Trace:
java.lang.AssertionError: expected:<3> but was:<2>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithUpgradeDomain.testChooseTargetWithExcludeNodes(TestReplicationPolicyWithUpgradeDomain.java:148)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestDataNodeMetrics.testTimeoutMetric

Error Message:
Bad value for metric DatanodeNetworkErrors expected:<1> but was:<0>

Stack Trace:
java.lang.AssertionError: Bad value for metric DatanodeNetworkErrors 
expected:<1> but was:<0>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.test.MetricsAsserts.assertCounter(MetricsAsserts.java:228)
        at 
org.apache.hadoop.hdfs.server.datanode.TestDataNodeMetrics.testTimeoutMetric(TestDataNodeMetrics.java:231)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailure.testVolumeFailure

Error Message:
Timed out waiting for /test1.txt to reach 2 replicas

Stack Trace:
java.util.concurrent.TimeoutException: Timed out waiting for /test1.txt to 
reach 2 replicas
        at 
org.apache.hadoop.hdfs.DFSTestUtil.waitReplication(DFSTestUtil.java:768)
        at 
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailure.testVolumeFailure(TestDataNodeVolumeFailure.java:201)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestNNHandlesBlockReportPerStorage.testOneReplicaRbwReportArrivesAfterBlockCompleted

Error Message:
Could not obtain block: 
BP-1796083477-67.195.81.153-1446269795604:blk_1073741825_1001 
file=/testOneReplicaRbwReportArrivesAfterBlockCompleted.dat

Stack Trace:
org.apache.hadoop.hdfs.BlockMissingException: Could not obtain block: 
BP-1796083477-67.195.81.153-1446269795604:blk_1073741825_1001 
file=/testOneReplicaRbwReportArrivesAfterBlockCompleted.dat
        at 
org.apache.hadoop.hdfs.DFSInputStream.chooseDataNode(DFSInputStream.java:986)
        at 
org.apache.hadoop.hdfs.DFSInputStream.blockSeekTo(DFSInputStream.java:612)
        at 
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:889)
        at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:941)
        at java.io.DataInputStream.read(DataInputStream.java:100)
        at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:87)
        at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:61)
        at 
org.apache.hadoop.hdfs.DFSTestUtil.readFileBuffer(DFSTestUtil.java:350)
        at org.apache.hadoop.hdfs.DFSTestUtil.readFile(DFSTestUtil.java:340)
        at 
org.apache.hadoop.hdfs.server.datanode.BlockReportTestBase.testOneReplicaRbwReportArrivesAfterBlockCompleted(BlockReportTestBase.java:643)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestNNHandlesBlockReportPerStorage.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
java.lang.AssertionError: Wrong number of PendingReplication blocks 
expected:<2> but was:<1>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.hdfs.server.datanode.BlockReportTestBase.blockReport_09(BlockReportTestBase.java:556)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestReadOnlySharedStorage.testReplicaCounting

Error Message:

Expected: is <3>
     but: was <2>

Stack Trace:
java.lang.AssertionError: 
Expected: is <3>
     but: was <2>
        at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:20)
        at org.junit.Assert.assertThat(Assert.java:865)
        at org.junit.Assert.assertThat(Assert.java:832)
        at 
org.apache.hadoop.hdfs.server.datanode.TestReadOnlySharedStorage.waitForLocations(TestReadOnlySharedStorage.java:169)
        at 
org.apache.hadoop.hdfs.server.datanode.TestReadOnlySharedStorage.testReplicaCounting(TestReadOnlySharedStorage.java:218)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget2[0]

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget2(TestReplicationPolicy.java:268)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithHalfStaleNodes[1]

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertFalse(Assert.java:64)
        at org.junit.Assert.assertFalse(Assert.java:74)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithHalfStaleNodes(TestReplicationPolicy.java:598)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testChooseTarget3

Error Message:
expected:<[DISK]s2:NORMAL:2.2.2.2:50010> but was:<[DISK]s3:NORMAL:3.3.3.3:50010>

Stack Trace:
java.lang.AssertionError: expected:<[DISK]s2:NORMAL:2.2.2.2:50010> but 
was:<[DISK]s3:NORMAL:3.3.3.3:50010>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:144)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testChooseTarget3(TestReplicationPolicyWithNodeGroup.java:307)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestBlockHasMultipleReplicasOnSameDN.testBlockHasMultipleReplicasOnSameDN

Error Message:

Expected: is <2>
     but: was <1>

Stack Trace:
java.lang.AssertionError: 
Expected: is <2>
     but: was <1>
        at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:20)
        at org.junit.Assert.assertThat(Assert.java:865)
        at org.junit.Assert.assertThat(Assert.java:832)
        at 
org.apache.hadoop.hdfs.server.datanode.TestBlockHasMultipleReplicasOnSameDN.testBlockHasMultipleReplicasOnSameDN(TestBlockHasMultipleReplicasOnSameDN.java:137)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureReporting.testSuccessiveVolumeFailures

Error Message:
File /test1 could only be replicated to 0 nodes instead of minReplication (=1). 
 There are 3 datanode(s) running and 2 node(s) are excluded in this operation.
 at 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1731)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:299)
 at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2457)
 at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:796)
 at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
 at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
 at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2305)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2301)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2299)


Stack Trace:
org.apache.hadoop.ipc.RemoteException: File /test1 could only be replicated to 
0 nodes instead of minReplication (=1).  There are 3 datanode(s) running and 2 
node(s) are excluded in this operation.
        at 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1731)
        at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:299)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2457)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:796)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
        at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2305)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2301)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2299)

        at org.apache.hadoop.ipc.Client.call(Client.java:1448)
        at org.apache.hadoop.ipc.Client.call(Client.java:1385)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
        at com.sun.proxy.$Proxy21.addBlock(Unknown Source)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:404)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
        at com.sun.proxy.$Proxy22.addBlock(Unknown Source)
        at 
org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:911)
        at 
org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1684)
        at 
org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1494)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:594)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureReporting.testDataNodeReconfigureWithVolumeFailures

Error Message:
Bad value for metric VolumeFailures expected:<1> but was:<0>

Stack Trace:
java.lang.AssertionError: Bad value for metric VolumeFailures expected:<1> but 
was:<0>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.test.MetricsAsserts.assertCounter(MetricsAsserts.java:228)
        at 
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureReporting.checkFailuresAtDataNode(TestDataNodeVolumeFailureReporting.java:480)
        at 
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureReporting.testDataNodeReconfigureWithVolumeFailures(TestDataNodeVolumeFailureReporting.java:370)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestDeleteBlockPool.testDeleteBlockPool

Error Message:
Must not delete if any block files exist unless force is true

Stack Trace:
java.lang.AssertionError: Must not delete if any block files exist unless force 
is true
        at org.junit.Assert.fail(Assert.java:88)
        at 
org.apache.hadoop.hdfs.server.datanode.TestDeleteBlockPool.testDeleteBlockPool(TestDeleteBlockPool.java:90)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestDiskError.testShutdown

Error Message:
Timed out waiting for /test.txt0 to reach 2 replicas

Stack Trace:
java.util.concurrent.TimeoutException: Timed out waiting for /test.txt0 to 
reach 2 replicas
        at 
org.apache.hadoop.hdfs.DFSTestUtil.waitReplication(DFSTestUtil.java:768)
        at 
org.apache.hadoop.hdfs.server.datanode.TestDiskError.testShutdown(TestDiskError.java:110)


FAILED:  org.apache.hadoop.hdfs.TestDecommission.testRecommission

Error Message:
Unexpected number of replicas from getFileBlockLocations expected:<5> but 
was:<3>

Stack Trace:
java.lang.AssertionError: Unexpected number of replicas from 
getFileBlockLocations expected:<5> but was:<3>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.hdfs.TestDecommission.testRecommission(TestDecommission.java:645)


FAILED:  
org.apache.hadoop.hdfs.TestEncryptedTransfer.testEncryptedAppendRequiringBlockTransfer[0]

Error Message:
Failed to replace a bad datanode on the existing pipeline due to no more good 
datanodes being available to try. (Nodes: 
current=[DatanodeInfoWithStorage[127.0.0.1:59919,DS-b0ed18c5-465a-4dfa-a870-5d82add7f0d6,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:50644,DS-f52abddb-1edd-40a6-9f69-a4811cadda45,DISK]],
 
original=[DatanodeInfoWithStorage[127.0.0.1:59919,DS-b0ed18c5-465a-4dfa-a870-5d82add7f0d6,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:50644,DS-f52abddb-1edd-40a6-9f69-a4811cadda45,DISK]]).
 The current failed datanode replacement policy is DEFAULT, and a client may 
configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' 
in its configuration.

Stack Trace:
java.io.IOException: Failed to replace a bad datanode on the existing pipeline 
due to no more good datanodes being available to try. (Nodes: 
current=[DatanodeInfoWithStorage[127.0.0.1:59919,DS-b0ed18c5-465a-4dfa-a870-5d82add7f0d6,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:50644,DS-f52abddb-1edd-40a6-9f69-a4811cadda45,DISK]],
 
original=[DatanodeInfoWithStorage[127.0.0.1:59919,DS-b0ed18c5-465a-4dfa-a870-5d82add7f0d6,DISK],
 
DatanodeInfoWithStorage[127.0.0.1:50644,DS-f52abddb-1edd-40a6-9f69-a4811cadda45,DISK]]).
 The current failed datanode replacement policy is DEFAULT, and a client may 
configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' 
in its configuration.
        at 
org.apache.hadoop.hdfs.DataStreamer.findNewDatanode(DataStreamer.java:1165)
        at 
org.apache.hadoop.hdfs.DataStreamer.addDatanode2ExistingPipeline(DataStreamer.java:1235)
        at 
org.apache.hadoop.hdfs.DataStreamer.handleDatanodeReplacement(DataStreamer.java:1426)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1341)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1324)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:598)


FAILED:  
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerWithStripedFile

Error Message:
Failed to get 6 nodes from namenode: blockGroupSize= 9, blocks.length= 4

Stack Trace:
java.io.IOException: Failed to get 6 nodes from namenode: blockGroupSize= 9, 
blocks.length= 4
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:441)
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:482)
        at 
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
        at 
org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:164)
        at 
org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:145)
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:922)
        at 
org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:753)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
        at 
org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
        at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:427)
        at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:376)
        at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:369)
        at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:362)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.doTestBalancerWithStripedFile(TestBalancer.java:1746)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerWithStripedFile(TestBalancer.java:1713)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testTwoOfThreeNodesDecommissioned

Error Message:
Should have three targets expected:<3> but was:<2>

Stack Trace:
java.lang.AssertionError: Should have three targets expected:<3> but was:<2>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.doTestTwoOfThreeNodesDecommissioned(TestBlockManager.java:197)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testTwoOfThreeNodesDecommissioned(TestBlockManager.java:180)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testAllNodesHoldingReplicasDecommissioned

Error Message:
Should have three targets expected:<4> but was:<3>

Stack Trace:
java.lang.AssertionError: Should have three targets expected:<4> but was:<3>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.doTestAllNodesHoldingReplicasDecommissioned(TestBlockManager.java:241)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testAllNodesHoldingReplicasDecommissioned(TestBlockManager.java:224)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testOneOfTwoRacksDecommissioned

Error Message:
computeBlockRecoveryWork should indicate replication is needed expected:<1> but 
was:<0>

Stack Trace:
java.lang.AssertionError: computeBlockRecoveryWork should indicate replication 
is needed expected:<1> but was:<0>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.scheduleSingleReplication(TestBlockManager.java:464)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.doTestOneOfTwoRacksDecommissioned(TestBlockManager.java:321)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testOneOfTwoRacksDecommissioned(TestBlockManager.java:277)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testSufficientlyReplBlocksUsesNewRack

Error Message:
computeBlockRecoveryWork should indicate replication is needed expected:<1> but 
was:<0>

Stack Trace:
java.lang.AssertionError: computeBlockRecoveryWork should indicate replication 
is needed expected:<1> but was:<0>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.scheduleSingleReplication(TestBlockManager.java:464)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.doTestSufficientlyReplBlocksUsesNewRack(TestBlockManager.java:345)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testSufficientlyReplBlocksUsesNewRack(TestBlockManager.java:337)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFS.testAppend

Error Message:
All datanodes 
[DatanodeInfoWithStorage[127.0.0.1:50269,DS-11b0518e-5018-491a-a22a-5af8a8a904e8,DISK]]
 are bad. Aborting...

Stack Trace:
java.io.IOException: All datanodes 
[DatanodeInfoWithStorage[127.0.0.1:50269,DS-11b0518e-5018-491a-a22a-5af8a8a904e8,DISK]]
 are bad. Aborting...
        at 
org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1393)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1337)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1324)
        at 
org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1122)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:544)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFSStriped.testEnd2End

Error Message:
Failed to get 6 nodes from namenode: blockGroupSize= 9, blocks.length= 5

Stack Trace:
java.io.IOException: Failed to get 6 nodes from namenode: blockGroupSize= 9, 
blocks.length= 5
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:441)
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:482)
        at 
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
        at 
org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:164)
        at 
org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:145)
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:922)
        at 
org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:753)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
        at 
org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
        at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:427)
        at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:376)
        at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:369)
        at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:362)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.doTestBalancerWithStripedFile(TestBalancer.java:1746)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTestWithStripedFile(TestBalancer.java:1706)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFSStriped.testEnd2End(TestBlockTokenWithDFSStriped.java:89)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFSStriped.testRead

Error Message:
Failed to get 6 nodes from namenode: blockGroupSize= 9, blocks.length= 5

Stack Trace:
java.io.IOException: Failed to get 6 nodes from namenode: blockGroupSize= 9, 
blocks.length= 5
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:441)
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:482)
        at 
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
        at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:125)
        at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:111)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:57)
        at java.io.DataOutputStream.write(DataOutputStream.java:107)
        at java.io.FilterOutputStream.write(FilterOutputStream.java:97)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFS.createFile(TestBlockTokenWithDFS.java:91)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFS.doTestRead(TestBlockTokenWithDFS.java:377)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFSStriped.testRead(TestBlockTokenWithDFSStriped.java:62)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestNodeCount.testNodeCount

Error Message:
Timeout: excess replica count not equal to 2 for block blk_1073741825_1001 
after 20000 msec.  Last counts: live = 2, excess = 0, corrupt = 0

Stack Trace:
java.util.concurrent.TimeoutException: Timeout: excess replica count not equal 
to 2 for block blk_1073741825_1001 after 20000 msec.  Last counts: live = 2, 
excess = 0, corrupt = 0
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestNodeCount.checkTimeout(TestNodeCount.java:152)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestNodeCount.checkTimeout(TestNodeCount.java:146)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestNodeCount.testNodeCount(TestNodeCount.java:130)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestOverReplicatedBlocks.testChooseReplicaToDelete

Error Message:
Timed out waiting for /foo2 to reach 4 replicas

Stack Trace:
java.util.concurrent.TimeoutException: Timed out waiting for /foo2 to reach 4 
replicas
        at 
org.apache.hadoop.hdfs.DFSTestUtil.waitReplication(DFSTestUtil.java:768)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestOverReplicatedBlocks.testChooseReplicaToDelete(TestOverReplicatedBlocks.java:166)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestPendingInvalidateBlock.testPendingDeleteUnknownBlocks

Error Message:
expected:<4> but was:<3>

Stack Trace:
java.lang.AssertionError: expected:<4> but was:<3>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestPendingInvalidateBlock.testPendingDeleteUnknownBlocks(TestPendingInvalidateBlock.java:150)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestPendingReplication.testBlockReceived

Error Message:
expected:<4> but was:<3>

Stack Trace:
java.lang.AssertionError: expected:<4> but was:<3>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestPendingReplication.testBlockReceived(TestPendingReplication.java:285)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestPendingReplication.testPendingAndInvalidate

Error Message:
expected:<1> but was:<2>

Stack Trace:
java.lang.AssertionError: expected:<1> but was:<2>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestPendingReplication.testPendingAndInvalidate(TestPendingReplication.java:391)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestRBWBlockInvalidation.testRWRInvalidation

Error Message:
All datanodes 
[DatanodeInfoWithStorage[127.0.0.1:46310,DS-0319d99c-003e-4814-a8f6-9856dbf6e7b3,DISK]]
 are bad. Aborting...

Stack Trace:
java.io.IOException: All datanodes 
[DatanodeInfoWithStorage[127.0.0.1:46310,DS-0319d99c-003e-4814-a8f6-9856dbf6e7b3,DISK]]
 are bad. Aborting...
        at 
org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1393)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1337)
        at 
org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1324)
        at 
org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1122)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:544)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithMoreThanAvailableNodesWithStaleness[0]

Error Message:
expected:<3> but was:<4>

Stack Trace:
java.lang.AssertionError: expected:<3> but was:<4>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithMoreThanAvailableNodes(TestReplicationPolicy.java:506)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithMoreThanAvailableNodesWithStaleness(TestReplicationPolicy.java:478)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithMoreThanAvailableNodes[0]

Error Message:
expected:<3> but was:<4>

Stack Trace:
java.lang.AssertionError: expected:<3> but was:<4>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithMoreThanAvailableNodes(TestReplicationPolicy.java:506)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testRereplicate3[0]

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testRereplicate3(TestReplicationPolicy.java:800)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithHalfStaleNodes[0]

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTargetWithHalfStaleNodes(TestReplicationPolicy.java:613)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget1[1]

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget1(TestReplicationPolicy.java:213)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget3[1]

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testChooseTarget3(TestReplicationPolicy.java:330)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testRereplicate3[1]

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.testRereplicate3(TestReplicationPolicy.java:800)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testRereplicate1

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testRereplicate1(TestReplicationPolicyWithNodeGroup.java:441)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testRereplicate3

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
        at org.junit.Assert.fail(Assert.java:86)
        at org.junit.Assert.assertTrue(Assert.java:41)
        at org.junit.Assert.assertTrue(Assert.java:52)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup.testRereplicate3(TestReplicationPolicyWithNodeGroup.java:504)


FAILED:  
org.apache.hadoop.hdfs.server.blockmanagement.TestUnderReplicatedBlocks.testNumberOfBlocksToBeReplicated

Error Message:
Timed out waiting for /testFile to reach 2 replicas

Stack Trace:
java.util.concurrent.TimeoutException: Timed out waiting for /testFile to reach 
2 replicas
        at 
org.apache.hadoop.hdfs.DFSTestUtil.waitReplication(DFSTestUtil.java:768)
        at 
org.apache.hadoop.hdfs.server.blockmanagement.TestUnderReplicatedBlocks.testNumberOfBlocksToBeReplicated(TestUnderReplicatedBlocks.java:122)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailure.testUnderReplicationAfterVolFailure

Error Message:
File /test2 could only be replicated to 0 nodes instead of minReplication (=1). 
 There are 3 datanode(s) running and 2 node(s) are excluded in this operation.
 at 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1731)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:299)
 at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2457)
 at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:796)
 at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
 at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
 at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2305)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2301)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2299)


Stack Trace:
org.apache.hadoop.ipc.RemoteException: File /test2 could only be replicated to 
0 nodes instead of minReplication (=1).  There are 3 datanode(s) running and 2 
node(s) are excluded in this operation.
        at 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1731)
        at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:299)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2457)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:796)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
        at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2305)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2301)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2299)

        at org.apache.hadoop.ipc.Client.call(Client.java:1448)
        at org.apache.hadoop.ipc.Client.call(Client.java:1385)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
        at com.sun.proxy.$Proxy21.addBlock(Unknown Source)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:404)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
        at com.sun.proxy.$Proxy22.addBlock(Unknown Source)
        at 
org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:911)
        at 
org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1684)
        at 
org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1494)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:594)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureReporting.testMultipleVolFailuresOnNode

Error Message:
File /test1 could only be replicated to 0 nodes instead of minReplication (=1). 
 There are 3 datanode(s) running and 2 node(s) are excluded in this operation.
 at 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1731)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:299)
 at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2457)
 at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:796)
 at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
 at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
 at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2305)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2301)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2299)


Stack Trace:
org.apache.hadoop.ipc.RemoteException: File /test1 could only be replicated to 
0 nodes instead of minReplication (=1).  There are 3 datanode(s) running and 2 
node(s) are excluded in this operation.
        at 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1731)
        at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:299)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2457)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:796)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
        at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2305)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2301)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2299)

        at org.apache.hadoop.ipc.Client.call(Client.java:1448)
        at org.apache.hadoop.ipc.Client.call(Client.java:1385)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
        at com.sun.proxy.$Proxy21.addBlock(Unknown Source)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:404)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
        at com.sun.proxy.$Proxy22.addBlock(Unknown Source)
        at 
org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:911)
        at 
org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1684)
        at 
org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1494)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:594)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureReporting.testVolFailureStatsPreservedOnNNRestart

Error Message:
File /test1 could only be replicated to 0 nodes instead of minReplication (=1). 
 There are 3 datanode(s) running and 2 node(s) are excluded in this operation.
 at 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1731)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:299)
 at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2457)
 at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:796)
 at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
 at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
 at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2305)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2301)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2299)


Stack Trace:
org.apache.hadoop.ipc.RemoteException: File /test1 could only be replicated to 
0 nodes instead of minReplication (=1).  There are 3 datanode(s) running and 2 
node(s) are excluded in this operation.
        at 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1731)
        at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:299)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2457)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:796)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
        at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2305)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2301)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2299)

        at org.apache.hadoop.ipc.Client.call(Client.java:1448)
        at org.apache.hadoop.ipc.Client.call(Client.java:1385)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
        at com.sun.proxy.$Proxy21.addBlock(Unknown Source)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:404)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
        at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
        at com.sun.proxy.$Proxy22.addBlock(Unknown Source)
        at 
org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:911)
        at 
org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1684)
        at 
org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1494)
        at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:594)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestNNHandlesCombinedBlockReport.blockReport_08

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
java.lang.AssertionError: Wrong number of PendingReplication blocks 
expected:<2> but was:<1>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.hdfs.server.datanode.BlockReportTestBase.blockReport_08(BlockReportTestBase.java:513)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  
org.apache.hadoop.hdfs.server.datanode.TestNNHandlesCombinedBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
java.lang.AssertionError: Wrong number of PendingReplication blocks 
expected:<2> but was:<1>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at 
org.apache.hadoop.hdfs.server.datanode.BlockReportTestBase.blockReport_09(BlockReportTestBase.java:556)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestAddOverReplicatedStripedBlocks.testProcessOverReplicatedSBSmallerThanFullBlocks

Error Message:
expected:<8> but was:<7>

Stack Trace:
java.lang.AssertionError: expected:<8> but was:<7>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:743)
        at org.junit.Assert.assertEquals(Assert.java:118)
        at org.junit.Assert.assertEquals(Assert.java:555)
        at org.junit.Assert.assertEquals(Assert.java:542)
        at 
org.apache.hadoop.hdfs.StripedFileTestUtil.verifyLocatedStripedBlocks(StripedFileTestUtil.java:346)
        at 
org.apache.hadoop.hdfs.server.namenode.TestAddOverReplicatedStripedBlocks.testProcessOverReplicatedSBSmallerThanFullBlocks(TestAddOverReplicatedStripedBlocks.java:164)


FAILED:  org.apache.hadoop.hdfs.TestSafeModeWithStripedFile.testStripedFile0

Error Message:
Failed to get 6 nodes from namenode: blockGroupSize= 9, blocks.length= 5

Stack Trace:
java.io.IOException: Failed to get 6 nodes from namenode: blockGroupSize= 9, 
blocks.length= 5
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:441)
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:482)
        at 
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
        at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:125)
        at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:111)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:57)
        at java.io.DataOutputStream.write(DataOutputStream.java:107)
        at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:89)
        at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:61)
        at org.apache.hadoop.hdfs.DFSTestUtil.writeFile(DFSTestUtil.java:828)
        at 
org.apache.hadoop.hdfs.TestSafeModeWithStripedFile.doTest(TestSafeModeWithStripedFile.java:94)
        at 
org.apache.hadoop.hdfs.TestSafeModeWithStripedFile.testStripedFile0(TestSafeModeWithStripedFile.java:72)


FAILED:  org.apache.hadoop.hdfs.TestSafeModeWithStripedFile.testStripedFile1

Error Message:
Failed to get 6 nodes from namenode: blockGroupSize= 9, blocks.length= 5

Stack Trace:
java.io.IOException: Failed to get 6 nodes from namenode: blockGroupSize= 9, 
blocks.length= 5
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:441)
        at 
org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:482)
        at 
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
        at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:125)
        at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:111)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:57)
        at java.io.DataOutputStream.write(DataOutputStream.java:107)
        at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:89)
        at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:61)
        at org.apache.hadoop.hdfs.DFSTestUtil.writeFile(DFSTestUtil.java:828)
        at 
org.apache.hadoop.hdfs.TestSafeModeWithStripedFile.doTest(TestSafeModeWithStripedFile.java:94)
        at 
org.apache.hadoop.hdfs.TestSafeModeWithStripedFile.testStripedFile1(TestSafeModeWithStripedFile.java:77)


Reply via email to