This is an automated email from the ASF dual-hosted git repository. dcapwell pushed a commit to branch cassandra-3.0 in repository https://gitbox.apache.org/repos/asf/cassandra.git
commit 5bb76ba97aa4decc8a0af676873e810ea036968e Merge: 7b3a15d f15c6b8 Author: David Capwell <dcapw...@apache.org> AuthorDate: Mon Oct 19 12:29:33 2020 -0700 Merge branch 'cassandra-2.2' into cassandra-3.0 .circleci/config.yml | 72 ++++++++--------- src/java/org/apache/cassandra/tools/NodeProbe.java | 15 ++++ src/java/org/apache/cassandra/tools/NodeTool.java | 52 ++++++++----- .../tools/{nodetool/Version.java => Output.java} | 22 +++--- .../cassandra/tools/nodetool/BootstrapResume.java | 4 +- .../apache/cassandra/tools/nodetool/Cleanup.java | 4 +- .../cassandra/tools/nodetool/ClearSnapshot.java | 4 +- .../tools/nodetool/CompactionHistory.java | 12 +-- .../cassandra/tools/nodetool/CompactionStats.java | 8 +- .../cassandra/tools/nodetool/DescribeCluster.java | 16 ++-- .../cassandra/tools/nodetool/DescribeRing.java | 8 +- .../tools/nodetool/FailureDetectorInfo.java | 4 +- .../apache/cassandra/tools/nodetool/GcStats.java | 6 +- .../tools/nodetool/GetCompactionThreshold.java | 8 +- .../tools/nodetool/GetCompactionThroughput.java | 4 +- .../cassandra/tools/nodetool/GetEndpoints.java | 4 +- .../tools/nodetool/GetInterDCStreamThroughput.java | 2 +- .../cassandra/tools/nodetool/GetLoggingLevels.java | 6 +- .../cassandra/tools/nodetool/GetSSTables.java | 4 +- .../tools/nodetool/GetStreamThroughput.java | 4 +- .../tools/nodetool/GetTraceProbability.java | 2 +- .../cassandra/tools/nodetool/GossipInfo.java | 4 +- .../org/apache/cassandra/tools/nodetool/Info.java | 41 +++++----- .../cassandra/tools/nodetool/ListSnapshots.java | 12 +-- .../apache/cassandra/tools/nodetool/NetStats.java | 44 ++++++----- .../cassandra/tools/nodetool/ProxyHistograms.java | 15 ++-- .../cassandra/tools/nodetool/RangeKeySample.java | 6 +- .../apache/cassandra/tools/nodetool/Refresh.java | 2 +- .../cassandra/tools/nodetool/RemoveNode.java | 6 +- .../apache/cassandra/tools/nodetool/Repair.java | 2 +- .../org/apache/cassandra/tools/nodetool/Ring.java | 29 +++---- .../org/apache/cassandra/tools/nodetool/Scrub.java | 5 +- .../apache/cassandra/tools/nodetool/Snapshot.java | 12 +-- .../apache/cassandra/tools/nodetool/Status.java | 34 ++++---- .../cassandra/tools/nodetool/StatusBackup.java | 4 +- .../cassandra/tools/nodetool/StatusBinary.java | 4 +- .../cassandra/tools/nodetool/StatusGossip.java | 4 +- .../cassandra/tools/nodetool/StatusHandoff.java | 9 ++- .../cassandra/tools/nodetool/StatusThrift.java | 4 +- .../cassandra/tools/nodetool/TableHistograms.java | 20 +++-- .../cassandra/tools/nodetool/TableStats.java | 90 +++++++++++----------- .../cassandra/tools/nodetool/TopPartitions.java | 18 +++-- .../apache/cassandra/tools/nodetool/TpStats.java | 12 +-- .../cassandra/tools/nodetool/UpgradeSSTable.java | 4 +- .../apache/cassandra/tools/nodetool/Verify.java | 4 +- .../apache/cassandra/tools/nodetool/Version.java | 4 +- .../cassandra/distributed/impl/Instance.java | 62 +++++++++++++-- .../test/ClientNetworkStopStartTest.java | 28 ++----- .../cassandra/distributed/test/NodeToolTest.java | 16 +++- 49 files changed, 434 insertions(+), 322 deletions(-) diff --cc .circleci/config.yml index 1fb2fcb,d5efe4f..2e40513 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@@ -1,97 -1,5 +1,97 @@@ version: 2 jobs: + j8_jvm_upgrade_dtests: + docker: + - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 - resource_class: medium ++ resource_class: xlarge + working_directory: ~/ + shell: /bin/bash -eo pipefail -l + parallelism: 1 + steps: + - attach_workspace: + at: /home/cassandra + - run: + name: Determine distributed Tests to Run + command: | + # reminder: this code (along with all the steps) is independently executed on every circle container + # so the goal here is to get the circleci script to return the tests *this* container will run + # which we do via the `circleci` cli tool. + + rm -fr ~/cassandra-dtest/upgrade_tests + echo "***java tests***" + + # get all of our unit test filenames + set -eo pipefail && circleci tests glob "$HOME/cassandra/test/distributed/**/*.java" > /tmp/all_java_unit_tests.txt + + # split up the unit tests into groups based on the number of containers we have + set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt + set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/distributed/;;g" | grep "Test\.java$" | grep upgrade > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt + echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt" + cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt + no_output_timeout: 15m + - run: + name: Log Environment Information + command: | + echo '*** id ***' + id + echo '*** cat /proc/cpuinfo ***' + cat /proc/cpuinfo + echo '*** free -m ***' + free -m + echo '*** df -m ***' + df -m + echo '*** ifconfig -a ***' + ifconfig -a + echo '*** uname -a ***' + uname -a + echo '*** mount ***' + mount + echo '*** env ***' + env + echo '*** java ***' + which java + java -version + - run: + name: Run Unit Tests (testclasslist) + command: | + set -x + export PATH=$JAVA_HOME/bin:$PATH + time mv ~/cassandra /tmp + cd /tmp/cassandra + if [ -d ~/dtest_jars ]; then + cp ~/dtest_jars/dtest* /tmp/cassandra/build/ + fi + test_timeout=$(grep 'name="test.distributed.timeout"' build.xml | awk -F'"' '{print $4}' || true) + if [ -z "$test_timeout" ]; then + test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}') + fi + ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed + no_output_timeout: 15m + - store_test_results: + path: /tmp/cassandra/build/test/output/ + - store_artifacts: + path: /tmp/cassandra/build/test/output + destination: junitxml + - store_artifacts: + path: /tmp/cassandra/build/test/logs + destination: logs + environment: + - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + - ANT_HOME: /usr/share/ant + - LANG: en_US.UTF-8 + - KEEP_TEST_DIR: true + - DEFAULT_DIR: /home/cassandra/cassandra-dtest + - PYTHONIOENCODING: utf-8 + - PYTHONUNBUFFERED: true + - CASS_DRIVER_NO_EXTENSIONS: true + - CASS_DRIVER_NO_CYTHON: true + - CASSANDRA_SKIP_SYNC: true + - DTEST_REPO: git://github.com/apache/cassandra-dtest.git + - DTEST_BRANCH: master - - CCM_MAX_HEAP_SIZE: 1024M - - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M + - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 build: docker: - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 @@@ -169,174 -71,17 +169,174 @@@ - CASSANDRA_SKIP_SYNC: true - DTEST_REPO: git://github.com/apache/cassandra-dtest.git - DTEST_BRANCH: master -- - CCM_MAX_HEAP_SIZE: 1024M -- - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M + - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + j8_dtests-no-vnodes: + docker: + - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 - resource_class: medium ++ resource_class: xlarge + working_directory: ~/ + shell: /bin/bash -eo pipefail -l - parallelism: 4 ++ parallelism: 100 + steps: + - attach_workspace: + at: /home/cassandra + - run: + name: Clone Cassandra dtest Repository (via git) + command: | + git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest + - run: + name: Configure virtualenv and python Dependencies + command: | + # note, this should be super quick as all dependencies should be pre-installed in the docker image + # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated + # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and + # rebuild the docker image! (it automatically pulls the latest requirements.txt on build) + source ~/env/bin/activate + export PATH=$JAVA_HOME/bin:$PATH - pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt ++ pip3 install --upgrade -r ~/cassandra-dtest/requirements.txt + pip3 freeze + - run: + name: Determine Tests to Run (j8_without_vnodes) + no_output_timeout: 5m + command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n export \nfi\n\necho \"***Collected DTests (j8_without_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --skip-resource-inte [...] + - run: + name: Run dtests (j8_without_vnodes) + no_output_timeout: 15m + command: "echo \"cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n\nsource ~/env/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not th [...] + - store_test_results: + path: /tmp/results + - store_artifacts: + path: /tmp/dtest + destination: dtest_j8_without_vnodes + - store_artifacts: + path: ~/cassandra-dtest/logs + destination: dtest_j8_without_vnodes_logs + environment: + - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + - ANT_HOME: /usr/share/ant + - LANG: en_US.UTF-8 + - KEEP_TEST_DIR: true + - DEFAULT_DIR: /home/cassandra/cassandra-dtest + - PYTHONIOENCODING: utf-8 + - PYTHONUNBUFFERED: true + - CASS_DRIVER_NO_EXTENSIONS: true + - CASS_DRIVER_NO_CYTHON: true + - CASSANDRA_SKIP_SYNC: true + - DTEST_REPO: git://github.com/apache/cassandra-dtest.git + - DTEST_BRANCH: master - - CCM_MAX_HEAP_SIZE: 1024M - - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M + - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + j8_upgradetests-no-vnodes: + docker: + - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 - resource_class: medium ++ resource_class: xlarge + working_directory: ~/ + shell: /bin/bash -eo pipefail -l - parallelism: 4 ++ parallelism: 100 + steps: + - attach_workspace: + at: /home/cassandra + - run: + name: Clone Cassandra dtest Repository (via git) + command: | + git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest + - run: + name: Configure virtualenv and python Dependencies + command: | + # note, this should be super quick as all dependencies should be pre-installed in the docker image + # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated + # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and + # rebuild the docker image! (it automatically pulls the latest requirements.txt on build) + source ~/env/bin/activate + export PATH=$JAVA_HOME/bin:$PATH - pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt ++ pip3 install --upgrade -r ~/cassandra-dtest/requirements.txt + pip3 freeze + - run: + name: Determine Tests to Run (j8_upgradetests_without_vnodes) + no_output_timeout: 5m + command: | + # reminder: this code (along with all the steps) is independently executed on every circle container + # so the goal here is to get the circleci script to return the tests *this* container will run + # which we do via the `circleci` cli tool. + + cd cassandra-dtest + source ~/env/bin/activate + export PATH=$JAVA_HOME/bin:$PATH + + if [ -n 'RUN_STATIC_UPGRADE_MATRIX=true' ]; then + export RUN_STATIC_UPGRADE_MATRIX=true + fi + + echo "***Collected DTests (j8_upgradetests_without_vnodes)***" + set -eo pipefail && ./run_dtests.py --execute-upgrade-tests --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_upgradetests_without_vnodes_raw --cassandra-dir=../cassandra + if [ -z '^upgrade_tests' ]; then + mv /tmp/all_dtest_tests_j8_upgradetests_without_vnodes_raw /tmp/all_dtest_tests_j8_upgradetests_without_vnodes + else + grep -e '^upgrade_tests' /tmp/all_dtest_tests_j8_upgradetests_without_vnodes_raw > /tmp/all_dtest_tests_j8_upgradetests_without_vnodes || { echo "Filter did not match any tests! Exiting build."; exit 0; } + fi + set -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_upgradetests_without_vnodes > /tmp/split_dtest_tests_j8_upgradetests_without_vnodes.txt + cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes.txt | tr '\n' ' ' > /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt + cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt + - run: + name: Run dtests (j8_upgradetests_without_vnodes) + no_output_timeout: 15m + command: | + echo "cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt" + cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt + + source ~/env/bin/activate + export PATH=$JAVA_HOME/bin:$PATH + if [ -n 'RUN_STATIC_UPGRADE_MATRIX=true' ]; then + export RUN_STATIC_UPGRADE_MATRIX=true + fi + + java -version + cd ~/cassandra-dtest + mkdir -p /tmp/dtest + + echo "env: $(env)" + echo "** done env" + mkdir -p /tmp/results/dtests + # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee + export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt` + set -o pipefail && cd ~/cassandra-dtest && pytest --execute-upgrade-tests --log-level="INFO" --junit-xml=/tmp/results/dtests/pytest_result_j8_upgradetests_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt + - store_test_results: + path: /tmp/results + - store_artifacts: + path: /tmp/dtest + destination: dtest_j8_upgradetests_without_vnodes + - store_artifacts: + path: ~/cassandra-dtest/logs + destination: dtest_j8_upgradetests_without_vnodes_logs + environment: + - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + - ANT_HOME: /usr/share/ant + - LANG: en_US.UTF-8 + - KEEP_TEST_DIR: true + - DEFAULT_DIR: /home/cassandra/cassandra-dtest + - PYTHONIOENCODING: utf-8 + - PYTHONUNBUFFERED: true + - CASS_DRIVER_NO_EXTENSIONS: true + - CASS_DRIVER_NO_CYTHON: true + - CASSANDRA_SKIP_SYNC: true + - DTEST_REPO: git://github.com/apache/cassandra-dtest.git + - DTEST_BRANCH: master - - CCM_MAX_HEAP_SIZE: 1024M - - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 j8_unit_tests: docker: - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 -- resource_class: medium ++ resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l -- parallelism: 4 ++ parallelism: 100 steps: - attach_workspace: at: /home/cassandra @@@ -418,72 -160,14 +418,72 @@@ - CASSANDRA_SKIP_SYNC: true - DTEST_REPO: git://github.com/apache/cassandra-dtest.git - DTEST_BRANCH: master -- - CCM_MAX_HEAP_SIZE: 1024M -- - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M + - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + j8_dtests-with-vnodes: + docker: + - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 - resource_class: medium ++ resource_class: xlarge + working_directory: ~/ + shell: /bin/bash -eo pipefail -l - parallelism: 4 ++ parallelism: 100 + steps: + - attach_workspace: + at: /home/cassandra + - run: + name: Clone Cassandra dtest Repository (via git) + command: | + git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest + - run: + name: Configure virtualenv and python Dependencies + command: | + # note, this should be super quick as all dependencies should be pre-installed in the docker image + # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated + # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and + # rebuild the docker image! (it automatically pulls the latest requirements.txt on build) + source ~/env/bin/activate + export PATH=$JAVA_HOME/bin:$PATH - pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt ++ pip3 install --upgrade -r ~/cassandra-dtest/requirements.txt + pip3 freeze + - run: + name: Determine Tests to Run (j8_with_vnodes) + no_output_timeout: 5m + command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n export \nfi\n\necho \"***Collected DTests (j8_with_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --skip-res [...] + - run: + name: Run dtests (j8_with_vnodes) + no_output_timeout: 15m + command: "echo \"cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n\nsource ~/env/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit [...] + - store_test_results: + path: /tmp/results + - store_artifacts: + path: /tmp/dtest + destination: dtest_j8_with_vnodes + - store_artifacts: + path: ~/cassandra-dtest/logs + destination: dtest_j8_with_vnodes_logs + environment: + - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64 + - ANT_HOME: /usr/share/ant + - LANG: en_US.UTF-8 + - KEEP_TEST_DIR: true + - DEFAULT_DIR: /home/cassandra/cassandra-dtest + - PYTHONIOENCODING: utf-8 + - PYTHONUNBUFFERED: true + - CASS_DRIVER_NO_EXTENSIONS: true + - CASS_DRIVER_NO_CYTHON: true + - CASSANDRA_SKIP_SYNC: true + - DTEST_REPO: git://github.com/apache/cassandra-dtest.git + - DTEST_BRANCH: master - - CCM_MAX_HEAP_SIZE: 1024M - - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 j8_jvm_dtests: docker: - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 -- resource_class: medium ++ resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l parallelism: 1 @@@ -568,14 -249,14 +568,14 @@@ - CASSANDRA_SKIP_SYNC: true - DTEST_REPO: git://github.com/apache/cassandra-dtest.git - DTEST_BRANCH: master -- - CCM_MAX_HEAP_SIZE: 1024M -- - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 utests_long: docker: - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 -- resource_class: medium ++ resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l parallelism: 1 @@@ -614,17 -292,17 +614,17 @@@ - CASSANDRA_SKIP_SYNC: true - DTEST_REPO: git://github.com/apache/cassandra-dtest.git - DTEST_BRANCH: master -- - CCM_MAX_HEAP_SIZE: 1024M -- - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 utests_compression: docker: - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 -- resource_class: medium ++ resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l -- parallelism: 4 ++ parallelism: 100 steps: - attach_workspace: at: /home/cassandra @@@ -706,11 -381,11 +706,11 @@@ - CASSANDRA_SKIP_SYNC: true - DTEST_REPO: git://github.com/apache/cassandra-dtest.git - DTEST_BRANCH: master -- - CCM_MAX_HEAP_SIZE: 1024M -- - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 - j8_dtests-with-vnodes: + j8_dtest_jars_build: docker: - image: spod/cassandra-testing-ubuntu1810-java11-w-dependencies:20190306 resource_class: medium @@@ -781,8 -596,8 +781,8 @@@ - CASSANDRA_SKIP_SYNC: true - DTEST_REPO: git://github.com/apache/cassandra-dtest.git - DTEST_BRANCH: master -- - CCM_MAX_HEAP_SIZE: 1024M -- - CCM_HEAP_NEWSIZE: 256M ++ - CCM_MAX_HEAP_SIZE: 2048M ++ - CCM_HEAP_NEWSIZE: 512M - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64 - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64 workflows: diff --cc src/java/org/apache/cassandra/tools/NodeProbe.java index 2425821,2ebef5d..caba806 --- a/src/java/org/apache/cassandra/tools/NodeProbe.java +++ b/src/java/org/apache/cassandra/tools/NodeProbe.java @@@ -119,7 -115,7 +119,8 @@@ public class NodeProbe implements AutoC protected CacheServiceMBean cacheService; protected StorageProxyMBean spProxy; protected HintedHandOffManagerMBean hhProxy; + protected BatchlogManagerMBean bmProxy; + protected Output output; private boolean failed; /** @@@ -242,35 -240,37 +247,45 @@@ public void close() throws IOException { - jmxc.close(); + try + { + jmxc.close(); + } + catch (ConnectException e) + { + // result of 'stopdaemon' command - i.e. if close() call fails, the daemon is shutdown + System.out.println("Cassandra has shutdown."); + } } + public void setOutput(Output output) + { + this.output = output; + } + + public Output output() + { + return output; + } + - public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException + public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException { - return ssProxy.forceKeyspaceCleanup(jobs, keyspaceName, columnFamilies); + return ssProxy.forceKeyspaceCleanup(jobs, keyspaceName, tables); } - public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, boolean reinsertOverflowedTTLRows, int jobs, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, boolean reinsertOverflowedTTL, int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException { - return ssProxy.scrub(disableSnapshot, skipCorrupted, checkData, reinsertOverflowedTTLRows, jobs, keyspaceName, columnFamilies); + return ssProxy.scrub(disableSnapshot, skipCorrupted, checkData, reinsertOverflowedTTL, jobs, keyspaceName, tables); } - public int verify(boolean extendedVerify, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException + public int verify(boolean extendedVerify, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { - return ssProxy.verify(extendedVerify, keyspaceName, columnFamilies); + return ssProxy.verify(extendedVerify, keyspaceName, tableNames); } - public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... columnFamilies) throws IOException, ExecutionException, InterruptedException + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException { - return ssProxy.upgradeSSTables(keyspaceName, excludeCurrentVersion, jobs, columnFamilies); + return ssProxy.upgradeSSTables(keyspaceName, excludeCurrentVersion, jobs, tableNames); } private void checkJobs(PrintStream out, int jobs) diff --cc src/java/org/apache/cassandra/tools/nodetool/Cleanup.java index 47c65c8,bc7bf32..e60d87e --- a/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java @@@ -52,7 -52,7 +52,7 @@@ public class Cleanup extends NodeToolCm try { - probe.forceKeyspaceCleanup(System.out, jobs, keyspace, tableNames); - probe.forceKeyspaceCleanup(probe.output().out, jobs, keyspace, cfnames); ++ probe.forceKeyspaceCleanup(probe.output().out, jobs, keyspace, tableNames); } catch (Exception e) { diff --cc src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java index 344d9dc,029ec61..18d8053 --- a/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java +++ b/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java @@@ -42,15 -44,15 +44,15 @@@ public class ListSnapshots extends Node final Map<String,TabularData> snapshotDetails = probe.getSnapshotDetails(); if (snapshotDetails.isEmpty()) { - System.out.println("There are no snapshots"); - out.printf("There are no snapshots"); ++ out.println("There are no snapshots"); return; } final long trueSnapshotsSize = probe.trueSnapshotsSize(); - final String format = "%-20s%-29s%-29s%-19s%-19s%n"; + final String format = "%-40s %-29s %-29s %-19s %-19s%n"; // display column names only once final List<String> indexNames = snapshotDetails.entrySet().iterator().next().getValue().getTabularType().getIndexNames(); - System.out.printf(format, (Object[]) indexNames.toArray(new String[indexNames.size()])); + out.printf(format, (Object[]) indexNames.toArray(new String[indexNames.size()])); for (final Map.Entry<String, TabularData> snapshotDetail : snapshotDetails.entrySet()) { diff --cc src/java/org/apache/cassandra/tools/nodetool/Scrub.java index 263291d,0e17d33..2b73756 --- a/src/java/org/apache/cassandra/tools/nodetool/Scrub.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Scrub.java @@@ -69,13 -69,11 +69,12 @@@ public class Scrub extends NodeToolCm { try { - probe.scrub(System.out, disableSnapshot, skipCorrupted, !noValidation, reinsertOverflowedTTL, jobs, keyspace, tableNames); - } - catch (IllegalArgumentException e) - probe.scrub(probe.output().out, disableSnapshot, skipCorrupted, !noValidation, reinsertOverflowedTTL, jobs, keyspace, cfnames); ++ probe.scrub(probe.output().out, disableSnapshot, skipCorrupted, !noValidation, reinsertOverflowedTTL, jobs, keyspace, tableNames); + } catch (IllegalArgumentException e) { throw e; - } catch (Exception e) + } + catch (Exception e) { throw new RuntimeException("Error occurred during scrubbing", e); } diff --cc src/java/org/apache/cassandra/tools/nodetool/Snapshot.java index 4f549e5,275b947..c2dd097 --- a/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java @@@ -67,9 -69,9 +69,9 @@@ public class Snapshot extends NodeToolC } if (!snapshotName.isEmpty()) sb.append(" with snapshot name [").append(snapshotName).append("]"); - System.out.println(sb.toString()); + out.println(sb.toString()); - probe.takeMultipleColumnFamilySnapshot(snapshotName, ktList.split(",")); + probe.takeMultipleTableSnapshot(snapshotName, ktList.split(",")); - System.out.println("Snapshot directory: " + snapshotName); + out.println("Snapshot directory: " + snapshotName); } else { diff --cc src/java/org/apache/cassandra/tools/nodetool/Status.java index 091040b,2bfc991..299608e --- a/src/java/org/apache/cassandra/tools/nodetool/Status.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Status.java @@@ -98,15 -99,15 +100,15 @@@ public class Status extends NodeToolCm for (Map.Entry<String, SetHostStat> dc : dcs.entrySet()) { String dcHeader = String.format("Datacenter: %s%n", dc.getKey()); - System.out.print(dcHeader); - for (int i = 0; i < (dcHeader.length() - 1); i++) System.out.print('='); - System.out.println(); - out.printf(dcHeader); ++ out.print(dcHeader); + for (int i = 0; i < (dcHeader.length() - 1); i++) out.print('='); + out.println(); // Legend - System.out.println("Status=Up/Down"); - System.out.println("|/ State=Normal/Leaving/Joining/Moving"); + out.println("Status=Up/Down"); + out.println("|/ State=Normal/Leaving/Joining/Moving"); - printNodesHeader(hasEffectiveOwns, isTokenPerNode, maxAddressLength); - printNodesHeader(hasEffectiveOwns, isTokenPerNode, out); ++ printNodesHeader(hasEffectiveOwns, isTokenPerNode, maxAddressLength, out); ArrayListMultimap<InetAddress, HostStat> hostToTokens = ArrayListMultimap.create(); for (HostStat stat : dc.getValue()) @@@ -116,43 -117,41 +118,43 @@@ { Float owns = ownerships.get(endpoint); List<HostStat> tokens = hostToTokens.get(endpoint); - printNode(endpoint.getHostAddress(), owns, tokens, hasEffectiveOwns, isTokenPerNode, maxAddressLength); - printNode(endpoint.getHostAddress(), owns, tokens, hasEffectiveOwns, isTokenPerNode, out); ++ printNode(endpoint.getHostAddress(), owns, tokens, hasEffectiveOwns, isTokenPerNode, maxAddressLength, out); } } - System.out.printf("%n" + errors); - out.printf("%n" + errors.toString()); ++ out.printf("%n" + errors); } - private void findMaxAddressLength(Map<String, SetHostStat> dcs) + private int computeMaxAddressLength(Map<String, SetHostStat> dcs) { - maxAddressLength = 0; - for (Map.Entry<String, SetHostStat> dc : dcs.entrySet()) - { - for (HostStat stat : dc.getValue()) - { - maxAddressLength = Math.max(maxAddressLength, stat.ipOrDns().length()); - } - } + int maxAddressLength = 0; + + Set<InetAddress> seenHosts = new HashSet<>(); + for (SetHostStat stats : dcs.values()) + for (HostStat stat : stats) + if (seenHosts.add(stat.endpoint)) + maxAddressLength = Math.max(maxAddressLength, stat.ipOrDns().length()); + + return maxAddressLength; } - private void printNodesHeader(boolean hasEffectiveOwns, boolean isTokenPerNode, int maxAddressLength) - private void printNodesHeader(boolean hasEffectiveOwns, boolean isTokenPerNode, PrintStream out) ++ private void printNodesHeader(boolean hasEffectiveOwns, boolean isTokenPerNode, int maxAddressLength, PrintStream out) { - String fmt = getFormat(hasEffectiveOwns, isTokenPerNode); + String fmt = getFormat(hasEffectiveOwns, isTokenPerNode, maxAddressLength); String owns = hasEffectiveOwns ? "Owns (effective)" : "Owns"; if (isTokenPerNode) - System.out.printf(fmt, "-", "-", "Address", "Load", owns, "Host ID", "Token", "Rack"); + out.printf(fmt, "-", "-", "Address", "Load", owns, "Host ID", "Token", "Rack"); else - System.out.printf(fmt, "-", "-", "Address", "Load", "Tokens", owns, "Host ID", "Rack"); + out.printf(fmt, "-", "-", "Address", "Load", "Tokens", owns, "Host ID", "Rack"); } - private void printNode(String endpoint, Float owns, List<HostStat> tokens, boolean hasEffectiveOwns, boolean isTokenPerNode, PrintStream out) + private void printNode(String endpoint, Float owns, List<HostStat> tokens, boolean hasEffectiveOwns, - boolean isTokenPerNode, int maxAddressLength) ++ boolean isTokenPerNode, int maxAddressLength, PrintStream out) { String status, state, load, strOwns, hostID, rack, fmt; - fmt = getFormat(hasEffectiveOwns, isTokenPerNode); + fmt = getFormat(hasEffectiveOwns, isTokenPerNode, maxAddressLength); if (liveNodes.contains(endpoint)) status = "U"; else if (unreachableNodes.contains(endpoint)) status = "D"; else status = "?"; @@@ -175,12 -174,14 +177,12 @@@ String endpointDns = tokens.get(0).ipOrDns(); if (isTokenPerNode) - System.out.printf(fmt, status, state, endpointDns, load, strOwns, hostID, tokens.get(0).token, rack); + out.printf(fmt, status, state, endpointDns, load, strOwns, hostID, tokens.get(0).token, rack); else - System.out.printf(fmt, status, state, endpointDns, load, tokens.size(), strOwns, hostID, rack); + out.printf(fmt, status, state, endpointDns, load, tokens.size(), strOwns, hostID, rack); } - private String getFormat( - boolean hasEffectiveOwns, - boolean isTokenPerNode) + private String getFormat(boolean hasEffectiveOwns, boolean isTokenPerNode, int maxAddressLength) { if (format == null) { diff --cc src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java index 65f6729,738cbeb..8dca732 --- a/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java +++ b/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java @@@ -17,6 -17,6 +17,8 @@@ */ package org.apache.cassandra.tools.nodetool; ++import java.io.PrintStream; ++ import io.airlift.command.Command; import org.apache.cassandra.tools.NodeProbe; @@@ -28,12 -28,9 +30,13 @@@ public class StatusHandoff extends Node @Override public void execute(NodeProbe probe) { - System.out.println(String.format("Hinted handoff is %s", - probe.output().out.println( ++ PrintStream out = probe.output().out; ++ out.println(String.format("Hinted handoff is %s", probe.isHandoffEnabled() ? "running" - : "not running"); + : "not running")); + + for (String dc : probe.getHintedHandoffDisabledDCs()) - System.out.println(String.format("Data center %s is disabled", dc)); ++ out.println(String.format("Data center %s is disabled", dc)); } - } + } diff --cc src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java index be3f799,d055209..04a0c97 --- a/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java +++ b/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java @@@ -54,9 -58,9 +58,9 @@@ public class TableHistograms extends No double[] estimatedColumnCountPercentiles = new double[7]; double[] offsetPercentiles = new double[]{0.5, 0.75, 0.95, 0.98, 0.99}; - if (ArrayUtils.isEmpty(estimatedRowSize) || ArrayUtils.isEmpty(estimatedColumnCount)) + if (ArrayUtils.isEmpty(estimatedPartitionSize) || ArrayUtils.isEmpty(estimatedColumnCount)) { - System.err.println("No SSTables exists, unable to calculate 'Partition Size' and 'Cell Count' percentiles"); + err.println("No SSTables exists, unable to calculate 'Partition Size' and 'Cell Count' percentiles"); for (int i = 0; i < 7; i++) { @@@ -66,12 -70,12 +70,12 @@@ } else { - EstimatedHistogram rowSizeHist = new EstimatedHistogram(estimatedRowSize); + EstimatedHistogram partitionSizeHist = new EstimatedHistogram(estimatedPartitionSize); EstimatedHistogram columnCountHist = new EstimatedHistogram(estimatedColumnCount); - if (rowSizeHist.isOverflowed()) + if (partitionSizeHist.isOverflowed()) { - System.err.println(String.format("Row sizes are larger than %s, unable to calculate percentiles", partitionSizeHist.getLargestBucketOffset())); - err.println(String.format("Row sizes are larger than %s, unable to calculate percentiles", rowSizeHist.getLargestBucketOffset())); ++ err.println(String.format("Row sizes are larger than %s, unable to calculate percentiles", partitionSizeHist.getLargestBucketOffset())); for (int i = 0; i < offsetPercentiles.length; i++) estimatedRowSizePercentiles[i] = Double.NaN; } diff --cc src/java/org/apache/cassandra/tools/nodetool/TableStats.java index c7d0d30,5df0a09..ea46d6e --- a/src/java/org/apache/cassandra/tools/nodetool/TableStats.java +++ b/src/java/org/apache/cassandra/tools/nodetool/TableStats.java @@@ -179,57 -181,58 +181,57 @@@ public class TableStats extends NodeToo throw e; } - System.out.println("\t\tSpace used (live): " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "LiveDiskSpaceUsed"), humanReadable)); - System.out.println("\t\tSpace used (total): " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "TotalDiskSpaceUsed"), humanReadable)); - System.out.println("\t\tSpace used by snapshots (total): " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "SnapshotsSize"), humanReadable)); + out.println("\t\tSpace used (live): " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "LiveDiskSpaceUsed"), humanReadable)); + out.println("\t\tSpace used (total): " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "TotalDiskSpaceUsed"), humanReadable)); + out.println("\t\tSpace used by snapshots (total): " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "SnapshotsSize"), humanReadable)); if (offHeapSize != null) - System.out.println("\t\tOff heap memory used (total): " + format(offHeapSize, humanReadable)); - System.out.println("\t\tSSTable Compression Ratio: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "CompressionRatio")); + out.println("\t\tOff heap memory used (total): " + format(offHeapSize, humanReadable)); + out.println("\t\tSSTable Compression Ratio: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "CompressionRatio")); - Object estimatedRowCount = probe.getColumnFamilyMetric(keyspaceName, tableName, "EstimatedRowCount"); - if (Long.valueOf(-1L).equals(estimatedRowCount)) + Object estimatedPartitionCount = probe.getColumnFamilyMetric(keyspaceName, tableName, "EstimatedPartitionCount"); + if (Long.valueOf(-1L).equals(estimatedPartitionCount)) { - estimatedRowCount = 0L; + estimatedPartitionCount = 0L; } - System.out.println("\t\tNumber of partitions (estimate): " + estimatedPartitionCount); - - out.println("\t\tNumber of keys (estimate): " + estimatedRowCount); ++ out.println("\t\tNumber of partitions (estimate): " + estimatedPartitionCount); - System.out.println("\t\tMemtable cell count: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableColumnsCount")); - System.out.println("\t\tMemtable data size: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableLiveDataSize"), humanReadable)); + out.println("\t\tMemtable cell count: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableColumnsCount")); + out.println("\t\tMemtable data size: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableLiveDataSize"), humanReadable)); if (memtableOffHeapSize != null) - System.out.println("\t\tMemtable off heap memory used: " + format(memtableOffHeapSize, humanReadable)); - System.out.println("\t\tMemtable switch count: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableSwitchCount")); - System.out.println("\t\tLocal read count: " + ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "ReadLatency")).getCount()); + out.println("\t\tMemtable off heap memory used: " + format(memtableOffHeapSize, humanReadable)); + out.println("\t\tMemtable switch count: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableSwitchCount")); + out.println("\t\tLocal read count: " + ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "ReadLatency")).getCount()); double localReadLatency = ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "ReadLatency")).getMean() / 1000; double localRLatency = localReadLatency > 0 ? localReadLatency : Double.NaN; - System.out.printf("\t\tLocal read latency: %01.3f ms%n", localRLatency); - System.out.println("\t\tLocal write count: " + ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "WriteLatency")).getCount()); + out.printf("\t\tLocal read latency: %01.3f ms%n", localRLatency); + out.println("\t\tLocal write count: " + ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "WriteLatency")).getCount()); double localWriteLatency = ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "WriteLatency")).getMean() / 1000; double localWLatency = localWriteLatency > 0 ? localWriteLatency : Double.NaN; - System.out.printf("\t\tLocal write latency: %01.3f ms%n", localWLatency); - System.out.println("\t\tPending flushes: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "PendingFlushes")); - System.out.println("\t\tBloom filter false positives: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "BloomFilterFalsePositives")); - System.out.printf("\t\tBloom filter false ratio: %s%n", String.format("%01.5f", probe.getColumnFamilyMetric(keyspaceName, tableName, "RecentBloomFilterFalseRatio"))); - System.out.println("\t\tBloom filter space used: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "BloomFilterDiskSpaceUsed"), humanReadable)); + out.printf("\t\tLocal write latency: %01.3f ms%n", localWLatency); + out.println("\t\tPending flushes: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "PendingFlushes")); + out.println("\t\tBloom filter false positives: " + probe.getColumnFamilyMetric(keyspaceName, tableName, "BloomFilterFalsePositives")); + out.printf("\t\tBloom filter false ratio: %s%n", String.format("%01.5f", probe.getColumnFamilyMetric(keyspaceName, tableName, "RecentBloomFilterFalseRatio"))); + out.println("\t\tBloom filter space used: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "BloomFilterDiskSpaceUsed"), humanReadable)); if (bloomFilterOffHeapSize != null) - System.out.println("\t\tBloom filter off heap memory used: " + format(bloomFilterOffHeapSize, humanReadable)); + out.println("\t\tBloom filter off heap memory used: " + format(bloomFilterOffHeapSize, humanReadable)); if (indexSummaryOffHeapSize != null) - System.out.println("\t\tIndex summary off heap memory used: " + format(indexSummaryOffHeapSize, humanReadable)); + out.println("\t\tIndex summary off heap memory used: " + format(indexSummaryOffHeapSize, humanReadable)); if (compressionMetadataOffHeapSize != null) - System.out.println("\t\tCompression metadata off heap memory used: " + format(compressionMetadataOffHeapSize, humanReadable)); + out.println("\t\tCompression metadata off heap memory used: " + format(compressionMetadataOffHeapSize, humanReadable)); - System.out.println("\t\tCompacted partition minimum bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MinPartitionSize"), humanReadable)); - System.out.println("\t\tCompacted partition maximum bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MaxPartitionSize"), humanReadable)); - System.out.println("\t\tCompacted partition mean bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MeanPartitionSize"), humanReadable)); - out.println("\t\tCompacted partition minimum bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MinRowSize"), humanReadable)); - out.println("\t\tCompacted partition maximum bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MaxRowSize"), humanReadable)); - out.println("\t\tCompacted partition mean bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MeanRowSize"), humanReadable)); ++ out.println("\t\tCompacted partition minimum bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MinPartitionSize"), humanReadable)); ++ out.println("\t\tCompacted partition maximum bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MaxPartitionSize"), humanReadable)); ++ out.println("\t\tCompacted partition mean bytes: " + format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MeanPartitionSize"), humanReadable)); CassandraMetricsRegistry.JmxHistogramMBean histogram = (CassandraMetricsRegistry.JmxHistogramMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "LiveScannedHistogram"); - System.out.println("\t\tAverage live cells per slice (last five minutes): " + histogram.getMean()); - System.out.println("\t\tMaximum live cells per slice (last five minutes): " + histogram.getMax()); + out.println("\t\tAverage live cells per slice (last five minutes): " + histogram.getMean()); + out.println("\t\tMaximum live cells per slice (last five minutes): " + histogram.getMax()); histogram = (CassandraMetricsRegistry.JmxHistogramMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "TombstoneScannedHistogram"); - System.out.println("\t\tAverage tombstones per slice (last five minutes): " + histogram.getMean()); - System.out.println("\t\tMaximum tombstones per slice (last five minutes): " + histogram.getMax()); + out.println("\t\tAverage tombstones per slice (last five minutes): " + histogram.getMean()); + out.println("\t\tMaximum tombstones per slice (last five minutes): " + histogram.getMax()); - System.out.println(""); + out.println(""); } - System.out.println("----------------"); + out.println("----------------"); } } diff --cc src/java/org/apache/cassandra/tools/nodetool/TpStats.java index 5d3eab7,abee213..c0e909a --- a/src/java/org/apache/cassandra/tools/nodetool/TpStats.java +++ b/src/java/org/apache/cassandra/tools/nodetool/TpStats.java @@@ -32,13 -33,14 +33,14 @@@ public class TpStats extends NodeToolCm @Override public void execute(NodeProbe probe) { - System.out.printf("%-25s%10s%10s%15s%10s%18s%n", "Pool Name", "Active", "Pending", "Completed", "Blocked", "All time blocked"); + PrintStream out = probe.output().out; + out.printf("%-25s%10s%10s%15s%10s%18s%n", "Pool Name", "Active", "Pending", "Completed", "Blocked", "All time blocked"); - Multimap<String, String> threadPools = probe.getThreadPools(); + Multimap<String, String> threadPools = probe.getThreadPools(); for (Map.Entry<String, String> tpool : threadPools.entries()) { - System.out.printf("%-25s%10s%10s%15s%10s%18s%n", + out.printf("%-25s%10s%10s%15s%10s%18s%n", tpool.getValue(), probe.getThreadPoolMetric(tpool.getKey(), tpool.getValue(), "ActiveTasks"), probe.getThreadPoolMetric(tpool.getKey(), tpool.getValue(), "PendingTasks"), diff --cc src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java index 82866e0,5623793..4d8ed94 --- a/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java +++ b/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java @@@ -51,9 -51,8 +51,9 @@@ public class UpgradeSSTable extends Nod { try { - probe.upgradeSSTables(System.out, keyspace, !includeAll, jobs, tableNames); - probe.upgradeSSTables(probe.output().out, keyspace, !includeAll, jobs, cfnames); - } catch (Exception e) ++ probe.upgradeSSTables(probe.output().out, keyspace, !includeAll, jobs, tableNames); + } + catch (Exception e) { throw new RuntimeException("Error occurred during enabling auto-compaction", e); } diff --cc src/java/org/apache/cassandra/tools/nodetool/Verify.java index c449366,7a3ccd6..2a6789a --- a/src/java/org/apache/cassandra/tools/nodetool/Verify.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Verify.java @@@ -48,7 -48,7 +48,7 @@@ public class Verify extends NodeToolCm { try { - probe.verify(System.out, extendedVerify, keyspace, tableNames); - probe.verify(probe.output().out, extendedVerify, keyspace, cfnames); ++ probe.verify(probe.output().out, extendedVerify, keyspace, tableNames); } catch (Exception e) { throw new RuntimeException("Error occurred during verifying", e); --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org For additional commands, e-mail: commits-h...@cassandra.apache.org