This is an automated email from the ASF dual-hosted git repository.
maciej pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iggy.git
The following commit(s) were added to refs/heads/master by this push:
new fcc5a447d feat(bench): prettier table output (#3090)
fcc5a447d is described below
commit fcc5a447dcc2cca8f8823168eaca9760f558fd09
Author: Tung To <[email protected]>
AuthorDate: Mon Apr 13 13:45:41 2026 +0700
feat(bench): prettier table output (#3090)
---
Cargo.lock | 3 +
Cargo.toml | 1 +
core/bench/Cargo.toml | 1 +
core/bench/report/Cargo.toml | 2 +
core/bench/report/src/prints.rs | 203 ++++++++++++++++--
core/bench/report/src/utils.rs | 9 +
.../src/actors/consumer/benchmark_consumer.rs | 222 +++++++++++++++++---
core/bench/src/actors/consumer/client/interface.rs | 1 +
.../actors/consumer/typed_benchmark_consumer.rs | 2 +
.../src/actors/producer/benchmark_producer.rs | 225 +++++++++++++++++---
core/bench/src/actors/producer/client/interface.rs | 1 +
.../actors/producer/typed_benchmark_producer.rs | 2 +
.../benchmark_producing_consumer.rs | 230 ++++++++++++++++++---
.../typed_benchmark_producing_consumer.rs | 3 +
core/bench/src/args/common.rs | 4 +
core/bench/src/benchmarks/common.rs | 6 +
core/bench/src/runner.rs | 3 +-
17 files changed, 830 insertions(+), 88 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index c7416b066..74d0febb2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1199,6 +1199,7 @@ version = "0.3.0"
dependencies = [
"charming",
"colored",
+ "comfy-table",
"derive-new",
"derive_more",
"human-repr",
@@ -1206,6 +1207,7 @@ dependencies = [
"serde",
"serde_json",
"sysinfo 0.38.4",
+ "terminal_size",
"tracing",
"uuid",
]
@@ -5508,6 +5510,7 @@ dependencies = [
"charming",
"chrono",
"clap",
+ "comfy-table",
"figlet-rs",
"futures-util",
"governor",
diff --git a/Cargo.toml b/Cargo.toml
index 320c827eb..5aa73608a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -272,6 +272,7 @@ strum_macros = "0.28.0"
syn = { version = "2", features = ["full", "extra-traits"] }
sysinfo = "0.38.4"
tempfile = "3.27.0"
+terminal_size = { version = "0.4.4" }
test-case = "3.3.1"
testcontainers-modules = { version = "0.15.0", features = ["postgres",
"http_wait"] }
thiserror = "2.0.18"
diff --git a/core/bench/Cargo.toml b/core/bench/Cargo.toml
index 8126ad97c..27daaa36e 100644
--- a/core/bench/Cargo.toml
+++ b/core/bench/Cargo.toml
@@ -37,6 +37,7 @@ bytes = { workspace = true }
charming = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true }
+comfy-table = { workspace = true }
figlet-rs = { workspace = true }
futures-util = { workspace = true }
governor = { workspace = true }
diff --git a/core/bench/report/Cargo.toml b/core/bench/report/Cargo.toml
index 4dd08e95d..b82cb9b2f 100644
--- a/core/bench/report/Cargo.toml
+++ b/core/bench/report/Cargo.toml
@@ -25,6 +25,7 @@ license = "Apache-2.0"
[dependencies]
charming = { workspace = true }
colored = { workspace = true }
+comfy-table = { workspace = true }
derive-new = { workspace = true }
derive_more = { workspace = true }
human-repr = { workspace = true }
@@ -32,5 +33,6 @@ rand = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
sysinfo = { workspace = true }
+terminal_size = { workspace = true }
tracing = { workspace = true }
uuid = { workspace = true }
diff --git a/core/bench/report/src/prints.rs b/core/bench/report/src/prints.rs
index 045560ec3..b6fbb8ac4 100644
--- a/core/bench/report/src/prints.rs
+++ b/core/bench/report/src/prints.rs
@@ -16,17 +16,22 @@
* under the License.
*/
-use colored::{Color, ColoredString, Colorize};
+use colored::{Color, Colorize};
+use comfy_table::{Cell, Color as TableColor, ContentArrangement, Table,
presets::UTF8_FULL};
use human_repr::HumanCount;
use tracing::info;
use crate::{
- actor_kind::ActorKind, benchmark_kind::BenchmarkKind,
group_metrics::BenchmarkGroupMetrics,
- group_metrics_kind::GroupMetricsKind, report::BenchmarkReport,
+ actor_kind::ActorKind,
+ benchmark_kind::BenchmarkKind,
+ group_metrics::BenchmarkGroupMetrics,
+ group_metrics_kind::GroupMetricsKind,
+ report::BenchmarkReport,
+ utils::{WIDE_LAYOUT_THRESHOLD, get_terminal_width},
};
impl BenchmarkReport {
- pub fn print_summary(&self) {
+ pub fn print_summary(&self, pretty: bool) {
let kind = self.params.benchmark_kind;
let total_messages = format!("{} messages, ", self.total_messages());
let total_size = format!(
@@ -71,7 +76,7 @@ impl BenchmarkReport {
self.group_metrics
.iter()
- .for_each(|s| info!("{}\n", s.formatted_string()));
+ .for_each(|s| println!("\n{}", s.formatted_string(pretty)));
}
pub fn total_messages(&self) -> u64 {
@@ -136,23 +141,33 @@ impl BenchmarkReport {
}
impl BenchmarkGroupMetrics {
- pub fn formatted_string(&self) -> ColoredString {
+ pub fn formatted_string(&self, pretty: bool) -> String {
+ if pretty {
+ let width = get_terminal_width();
+ if width >= WIDE_LAYOUT_THRESHOLD {
+ self.format_wide_layout()
+ } else {
+ self.format_narrow_layout()
+ }
+ } else {
+ self.format_original()
+ }
+ }
+
+ fn format_original(&self) -> String {
let (prefix, color) = match self.summary.kind {
GroupMetricsKind::Producers => ("Producers Results", Color::Green),
GroupMetricsKind::Consumers => ("Consumers Results", Color::Green),
GroupMetricsKind::ProducersAndConsumers => ("Aggregate Results",
Color::Red),
GroupMetricsKind::ProducingConsumers => ("Producing Consumer
Results", Color::Red),
};
-
let actor = self.summary.kind.actor();
-
let total_mb = format!("{:.2}",
self.summary.total_throughput_megabytes_per_second);
let total_msg = format!("{:.0}",
self.summary.total_throughput_messages_per_second);
let avg_mb = format!(
"{:.2}",
self.summary.average_throughput_megabytes_per_second
);
-
let p50 = format!("{:.2}", self.summary.average_p50_latency_ms);
let p90 = format!("{:.2}", self.summary.average_p90_latency_ms);
let p95 = format!("{:.2}", self.summary.average_p95_latency_ms);
@@ -170,11 +185,169 @@ impl BenchmarkGroupMetrics {
);
format!(
- "{prefix}: Total throughput: {total_mb} MB/s, {total_msg}
messages/s, average throughput per {actor}: {avg_mb} MB/s, \
- p50 latency: {p50} ms, p90 latency: {p90} ms, p95 latency: {p95}
ms, \
- p99 latency: {p99} ms, p999 latency: {p999} ms, p9999 latency:
{p9999} ms, average latency: {avg} ms, \
- median latency: {median} ms, min: {min} ms, max: {max} ms, std
dev: {std_dev} ms, total time: {total_test_time} s"
- )
- .color(color)
+ "{prefix}: Total throughput: {total_mb} MB/s, {total_msg} messages/s,
average throughput per {actor}: {avg_mb} MB/s, \
+ p50 latency: {p50} ms, p90 latency: {p90} ms, p95 latency: {p95} ms, \
+ p99 latency: {p99} ms, p999 latency: {p999} ms, p9999 latency: {p9999}
ms, average latency: {avg} ms, \
+ median latency: {median} ms, min: {min} ms, max: {max} ms, std dev:
{std_dev} ms, total time: {total_test_time} s"
+ )
+ .color(color)
+ .to_string()
+ }
+
+ fn format_wide_layout(&self) -> String {
+ let (prefix, color) = match self.summary.kind {
+ GroupMetricsKind::Producers => ("Producers Results",
TableColor::Green),
+ GroupMetricsKind::Consumers => ("Consumers Results",
TableColor::Green),
+ GroupMetricsKind::ProducersAndConsumers => ("Aggregate Results",
TableColor::Red),
+ GroupMetricsKind::ProducingConsumers => ("Producing Consumer
Results", TableColor::Red),
+ };
+ let actor = self.summary.kind.actor();
+
+ let mut summary_table = Table::new();
+ summary_table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic);
+
+ summary_table.add_row(vec![
+ Cell::new(prefix).fg(color),
+ Cell::new(format!(
+ "{:.2} s",
+ self.avg_throughput_mb_ts.points.last().unwrap().time_s
+ ))
+ .fg(color),
+ Cell::new(format!(
+ "{:.2} MB/s",
+ self.summary.total_throughput_megabytes_per_second
+ ))
+ .fg(color),
+ Cell::new(format!(
+ "{:.0} msg/s",
+ self.summary.total_throughput_messages_per_second
+ ))
+ .fg(color),
+ Cell::new(format!(
+ "{:.2} MB/s per {}",
+ self.summary.average_throughput_megabytes_per_second, actor
+ ))
+ .fg(color),
+ ]);
+
+ let mut latency_table = Table::new();
+ latency_table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic);
+
+ latency_table.add_row(vec![
+ "Latency", "p50", "p90", "p95", "p99", "p999", "p9999", "avg",
"median", "min", "max",
+ "std dev",
+ ]);
+ latency_table.add_row(vec![
+ "(ms)".to_string(),
+ format!("{:.2}", self.summary.average_p50_latency_ms),
+ format!("{:.2}", self.summary.average_p90_latency_ms),
+ format!("{:.2}", self.summary.average_p95_latency_ms),
+ format!("{:.2}", self.summary.average_p99_latency_ms),
+ format!("{:.2}", self.summary.average_p999_latency_ms),
+ format!("{:.2}", self.summary.average_p9999_latency_ms),
+ format!("{:.2}", self.summary.average_latency_ms),
+ format!("{:.2}", self.summary.average_median_latency_ms),
+ format!("{:.2}", self.summary.min_latency_ms),
+ format!("{:.2}", self.summary.max_latency_ms),
+ format!("{:.2}", self.summary.std_dev_latency_ms),
+ ]);
+
+ format!("\n{}\n{}", summary_table, latency_table)
+ }
+
+ fn format_narrow_layout(&self) -> String {
+ let (prefix, color) = match self.summary.kind {
+ GroupMetricsKind::Producers => ("Producers Results",
TableColor::Green),
+ GroupMetricsKind::Consumers => ("Consumers Results",
TableColor::Green),
+ GroupMetricsKind::ProducersAndConsumers => ("Aggregate Results",
TableColor::Red),
+ GroupMetricsKind::ProducingConsumers => ("Producing Consumer
Results", TableColor::Red),
+ };
+ let actor = self.summary.kind.actor();
+
+ let mut table = Table::new();
+ table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic)
+ .set_width(60);
+
+ table.add_row(vec![Cell::new(prefix).fg(color),
Cell::new("").fg(color)]);
+
+ table.add_row(vec!["Summary", ""]);
+ table.add_row(vec![
+ "Total Time".to_string(),
+ format!(
+ "{:.2} s",
+ self.avg_throughput_mb_ts.points.last().unwrap().time_s
+ ),
+ ]);
+
+ table.add_row(vec!["Throughput", ""]);
+ table.add_row(vec![
+ "Total (MB/s)".to_string(),
+ format!("{:.2}",
self.summary.total_throughput_megabytes_per_second),
+ ]);
+ table.add_row(vec![
+ "Total (msg/s)".to_string(),
+ format!("{:.0}",
self.summary.total_throughput_messages_per_second),
+ ]);
+ table.add_row(vec![
+ format!("Avg per {} (MB/s)", actor),
+ format!(
+ "{:.2}",
+ self.summary.average_throughput_megabytes_per_second
+ ),
+ ]);
+
+ table.add_row(vec!["Latency", ""]);
+ table.add_row(vec![
+ "p50".to_string(),
+ format!("{:.2} ms", self.summary.average_p50_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p90".to_string(),
+ format!("{:.2} ms", self.summary.average_p90_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p95".to_string(),
+ format!("{:.2} ms", self.summary.average_p95_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p99".to_string(),
+ format!("{:.2} ms", self.summary.average_p99_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p999".to_string(),
+ format!("{:.2} ms", self.summary.average_p999_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p9999".to_string(),
+ format!("{:.2} ms", self.summary.average_p9999_latency_ms),
+ ]);
+ table.add_row(vec![
+ "avg".to_string(),
+ format!("{:.2} ms", self.summary.average_latency_ms),
+ ]);
+ table.add_row(vec![
+ "median".to_string(),
+ format!("{:.2} ms", self.summary.average_median_latency_ms),
+ ]);
+ table.add_row(vec![
+ "min".to_string(),
+ format!("{:.2} ms", self.summary.min_latency_ms),
+ ]);
+ table.add_row(vec![
+ "max".to_string(),
+ format!("{:.2} ms", self.summary.max_latency_ms),
+ ]);
+ table.add_row(vec![
+ "std dev".to_string(),
+ format!("{:.2} ms", self.summary.std_dev_latency_ms),
+ ]);
+
+ format!("\n{}", table)
}
}
diff --git a/core/bench/report/src/utils.rs b/core/bench/report/src/utils.rs
index b4d0abcc7..605827207 100644
--- a/core/bench/report/src/utils.rs
+++ b/core/bench/report/src/utils.rs
@@ -18,6 +18,15 @@
use crate::time_series::{TimePoint, TimeSeries};
use serde::Serializer;
+use terminal_size::{Width, terminal_size};
+
+/// Terminal width threshold for switching between narrow and wide table
layouts
+pub const WIDE_LAYOUT_THRESHOLD: u16 = 140;
+
+/// Returns current terminal width in columns
+pub fn get_terminal_width() -> u16 {
+ terminal_size().map(|(Width(w), _)| w).unwrap_or(80)
+}
pub(crate) fn round_float<S>(value: &f64, serializer: S) -> Result<S::Ok,
S::Error>
where
diff --git a/core/bench/src/actors/consumer/benchmark_consumer.rs
b/core/bench/src/actors/consumer/benchmark_consumer.rs
index 6181fc49b..5efcfd2c1 100644
--- a/core/bench/src/actors/consumer/benchmark_consumer.rs
+++ b/core/bench/src/actors/consumer/benchmark_consumer.rs
@@ -26,6 +26,9 @@ use bench_report::actor_kind::ActorKind;
use bench_report::benchmark_kind::BenchmarkKind;
use bench_report::individual_metrics::BenchmarkIndividualMetrics;
use bench_report::numeric_parameter::BenchmarkNumericParameter;
+use bench_report::utils::{WIDE_LAYOUT_THRESHOLD, get_terminal_width};
+use comfy_table::presets::UTF8_FULL;
+use comfy_table::{ContentArrangement, Table};
use human_repr::HumanCount;
use iggy::prelude::*;
use std::sync::Arc;
@@ -139,6 +142,7 @@ impl<C: BenchmarkConsumerClient> BenchmarkConsumer<C> {
u32::try_from(batches_processed).unwrap_or(u32::MAX),
&self.config.messages_per_batch,
&metrics,
+ self.config.pretty,
);
Ok(metrics)
@@ -185,29 +189,201 @@ impl<C: BenchmarkConsumerClient> BenchmarkConsumer<C> {
message_batches: u32,
messages_per_batch: &BenchmarkNumericParameter,
metrics: &BenchmarkIndividualMetrics,
+ pretty: bool,
) {
- info!(
- "Consumer #{} → polled {} messages, {} batches of {} messages in
{:.2} s, total size: {}, average throughput: {:.2} MB/s, \
- p50 latency: {:.2} ms, p90 latency: {:.2} ms, p95 latency: {:.2} ms, p99
latency: {:.2} ms, p999 latency: {:.2} ms, \
- p9999 latency: {:.2} ms, average latency: {:.2} ms, median latency: {:.2}
ms, min latency: {:.2} ms, max latency: {:.2} ms, std dev latency: {:.2} ms",
- consumer_id,
- total_messages.human_count_bare(),
- message_batches.human_count_bare(),
- messages_per_batch,
- metrics.summary.total_time_secs,
- IggyByteSize::from(metrics.summary.total_user_data_bytes),
- metrics.summary.throughput_megabytes_per_second,
- metrics.summary.p50_latency_ms,
- metrics.summary.p90_latency_ms,
- metrics.summary.p95_latency_ms,
- metrics.summary.p99_latency_ms,
- metrics.summary.p999_latency_ms,
- metrics.summary.p9999_latency_ms,
- metrics.summary.avg_latency_ms,
- metrics.summary.median_latency_ms,
- metrics.summary.min_latency_ms,
- metrics.summary.max_latency_ms,
- metrics.summary.std_dev_latency_ms,
- );
+ if pretty {
+ let width = get_terminal_width();
+
+ if width >= WIDE_LAYOUT_THRESHOLD {
+ Self::print_wide_layout(
+ consumer_id,
+ total_messages,
+ message_batches,
+ messages_per_batch,
+ metrics,
+ );
+ } else {
+ Self::print_narrow_layout(
+ consumer_id,
+ total_messages,
+ message_batches,
+ messages_per_batch,
+ metrics,
+ );
+ }
+ } else {
+ info!(
+ "Consumer #{} → polled {} messages, {} batches of {} messages
in {:.2} s, total size: {}, average throughput: {:.2} MB/s, \
+ p50 latency: {:.2} ms, p90 latency: {:.2} ms, p95 latency: {:.2}
ms, p99 latency: {:.2} ms, p999 latency: {:.2} ms, \
+ p9999 latency: {:.2} ms, average latency: {:.2} ms, median
latency: {:.2} ms, min latency: {:.2} ms, max latency: {:.2} ms, std dev
latency: {:.2} ms",
+ consumer_id,
+ total_messages.human_count_bare(),
+ message_batches.human_count_bare(),
+ messages_per_batch,
+ metrics.summary.total_time_secs,
+ IggyByteSize::from(metrics.summary.total_user_data_bytes),
+ metrics.summary.throughput_megabytes_per_second,
+ metrics.summary.p50_latency_ms,
+ metrics.summary.p90_latency_ms,
+ metrics.summary.p95_latency_ms,
+ metrics.summary.p99_latency_ms,
+ metrics.summary.p999_latency_ms,
+ metrics.summary.p9999_latency_ms,
+ metrics.summary.avg_latency_ms,
+ metrics.summary.median_latency_ms,
+ metrics.summary.min_latency_ms,
+ metrics.summary.max_latency_ms,
+ metrics.summary.std_dev_latency_ms,
+ );
+ }
+ }
+
+ fn print_wide_layout(
+ consumer_id: u32,
+ total_messages: u64,
+ message_batches: u32,
+ messages_per_batch: &BenchmarkNumericParameter,
+ metrics: &BenchmarkIndividualMetrics,
+ ) {
+ let mut summary_table = Table::new();
+ summary_table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic);
+
+ summary_table.add_row(vec![
+ format!("Consumer #{}", consumer_id),
+ format!("{} msgs", total_messages.human_count_bare()),
+ format!(
+ "{} x {}",
+ message_batches.human_count_bare(),
+ messages_per_batch
+ ),
+
IggyByteSize::from(metrics.summary.total_user_data_bytes).to_string(),
+ format!("{:.2} s", metrics.summary.total_time_secs),
+ format!(
+ "{:.2} MB/s",
+ metrics.summary.throughput_megabytes_per_second
+ ),
+ ]);
+
+ println!("\n{summary_table}");
+
+ let mut latency_table = Table::new();
+ latency_table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic);
+
+ latency_table.add_row(vec![
+ "Latency", "p50", "p90", "p95", "p99", "p999", "p9999", "avg",
"median", "min", "max",
+ "std dev",
+ ]);
+ latency_table.add_row(vec![
+ "(ms)".to_string(),
+ format!("{:.2}", metrics.summary.p50_latency_ms),
+ format!("{:.2}", metrics.summary.p90_latency_ms),
+ format!("{:.2}", metrics.summary.p95_latency_ms),
+ format!("{:.2}", metrics.summary.p99_latency_ms),
+ format!("{:.2}", metrics.summary.p999_latency_ms),
+ format!("{:.2}", metrics.summary.p9999_latency_ms),
+ format!("{:.2}", metrics.summary.avg_latency_ms),
+ format!("{:.2}", metrics.summary.median_latency_ms),
+ format!("{:.2}", metrics.summary.min_latency_ms),
+ format!("{:.2}", metrics.summary.max_latency_ms),
+ format!("{:.2}", metrics.summary.std_dev_latency_ms),
+ ]);
+
+ println!("{latency_table}");
+ }
+
+ fn print_narrow_layout(
+ consumer_id: u32,
+ total_messages: u64,
+ message_batches: u32,
+ messages_per_batch: &BenchmarkNumericParameter,
+ metrics: &BenchmarkIndividualMetrics,
+ ) {
+ let mut table = Table::new();
+ table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic)
+ .set_width(60);
+
+ table.add_row(vec![format!("Consumer #{}", consumer_id),
String::new()]);
+
+ table.add_row(vec!["Summary", ""]);
+ table.add_row(vec![
+ "Messages (polled)".to_string(),
+ total_messages.human_count_bare().to_string(),
+ ]);
+ table.add_row(vec![
+ "Batches".to_string(),
+ format!(
+ "{} x {}",
+ message_batches.human_count_bare(),
+ messages_per_batch
+ ),
+ ]);
+ table.add_row(vec![
+ "Total Size".to_string(),
+
IggyByteSize::from(metrics.summary.total_user_data_bytes).to_string(),
+ ]);
+ table.add_row(vec![
+ "Duration".to_string(),
+ format!("{:.2} s", metrics.summary.total_time_secs),
+ ]);
+
+ table.add_row(vec!["Throughput", ""]);
+ table.add_row(vec![
+ "MB/s".to_string(),
+ format!("{:.2}", metrics.summary.throughput_megabytes_per_second),
+ ]);
+
+ table.add_row(vec!["Latency", ""]);
+ table.add_row(vec![
+ "p50".to_string(),
+ format!("{:.2} ms", metrics.summary.p50_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p90".to_string(),
+ format!("{:.2} ms", metrics.summary.p90_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p95".to_string(),
+ format!("{:.2} ms", metrics.summary.p95_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p99".to_string(),
+ format!("{:.2} ms", metrics.summary.p99_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p999".to_string(),
+ format!("{:.2} ms", metrics.summary.p999_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p9999".to_string(),
+ format!("{:.2} ms", metrics.summary.p9999_latency_ms),
+ ]);
+ table.add_row(vec![
+ "avg".to_string(),
+ format!("{:.2} ms", metrics.summary.avg_latency_ms),
+ ]);
+ table.add_row(vec![
+ "median".to_string(),
+ format!("{:.2} ms", metrics.summary.median_latency_ms),
+ ]);
+ table.add_row(vec![
+ "min".to_string(),
+ format!("{:.2} ms", metrics.summary.min_latency_ms),
+ ]);
+ table.add_row(vec![
+ "max".to_string(),
+ format!("{:.2} ms", metrics.summary.max_latency_ms),
+ ]);
+ table.add_row(vec![
+ "std dev".to_string(),
+ format!("{:.2} ms", metrics.summary.std_dev_latency_ms),
+ ]);
+
+ println!("\n{table}");
}
}
diff --git a/core/bench/src/actors/consumer/client/interface.rs
b/core/bench/src/actors/consumer/client/interface.rs
index 2dd004512..e82eb2149 100644
--- a/core/bench/src/actors/consumer/client/interface.rs
+++ b/core/bench/src/actors/consumer/client/interface.rs
@@ -30,6 +30,7 @@ pub struct BenchmarkConsumerConfig {
pub warmup_time: IggyDuration,
pub polling_kind: PollingKind,
pub origin_timestamp_latency_calculation: bool,
+ pub pretty: bool,
}
pub trait ConsumerClient: Send + Sync {
diff --git a/core/bench/src/actors/consumer/typed_benchmark_consumer.rs
b/core/bench/src/actors/consumer/typed_benchmark_consumer.rs
index 4455e6895..55ea065bd 100644
--- a/core/bench/src/actors/consumer/typed_benchmark_consumer.rs
+++ b/core/bench/src/actors/consumer/typed_benchmark_consumer.rs
@@ -57,6 +57,7 @@ impl TypedBenchmarkConsumer {
polling_kind: PollingKind,
limit_bytes_per_second: Option<IggyByteSize>,
origin_timestamp_latency_calculation: bool,
+ pretty: bool,
) -> Self {
let config = BenchmarkConsumerConfig {
consumer_id,
@@ -66,6 +67,7 @@ impl TypedBenchmarkConsumer {
warmup_time,
polling_kind,
origin_timestamp_latency_calculation,
+ pretty,
};
if use_high_level_api {
diff --git a/core/bench/src/actors/producer/benchmark_producer.rs
b/core/bench/src/actors/producer/benchmark_producer.rs
index 9c9f2a555..1f1ab96ab 100644
--- a/core/bench/src/actors/producer/benchmark_producer.rs
+++ b/core/bench/src/actors/producer/benchmark_producer.rs
@@ -24,10 +24,14 @@ use crate::{
rate_limiter::BenchmarkRateLimiter,
},
};
-use bench_report::actor_kind::ActorKind;
use bench_report::benchmark_kind::BenchmarkKind;
use bench_report::individual_metrics::BenchmarkIndividualMetrics;
use bench_report::numeric_parameter::BenchmarkNumericParameter;
+use bench_report::{
+ actor_kind::ActorKind,
+ utils::{WIDE_LAYOUT_THRESHOLD, get_terminal_width},
+};
+use comfy_table::{ContentArrangement, Table, presets::UTF8_FULL};
use human_repr::HumanCount;
use iggy::prelude::*;
use std::{sync::Arc, time::Duration};
@@ -164,6 +168,7 @@ impl<P: BenchmarkProducerClient> BenchmarkProducer<P> {
batches_processed,
&self.config.messages_per_batch,
&metrics,
+ self.config.pretty,
);
Ok(metrics)
@@ -194,29 +199,201 @@ impl<P: BenchmarkProducerClient> BenchmarkProducer<P> {
message_batches: u64,
messages_per_batch: &BenchmarkNumericParameter,
metrics: &BenchmarkIndividualMetrics,
+ pretty: bool,
) {
- info!(
- "Producer #{} → sent {} messages in {} batches of {} messages in
{:.2} s, total size: {}, average throughput: {:.2} MB/s, \
- p50 latency: {:.2} ms, p90 latency: {:.2} ms, p95 latency: {:.2} ms, p99
latency: {:.2} ms, p999 latency: {:.2} ms, p9999 latency: {:.2} ms, \
- average latency: {:.2} ms, median latency: {:.2} ms, min latency: {:.2}
ms, max latency: {:.2} ms, std dev latency: {:.2} ms",
- producer_id,
- total_messages.human_count_bare(),
- message_batches.human_count_bare(),
- messages_per_batch,
- metrics.summary.total_time_secs,
- IggyByteSize::from(metrics.summary.total_user_data_bytes),
- metrics.summary.throughput_megabytes_per_second,
- metrics.summary.p50_latency_ms,
- metrics.summary.p90_latency_ms,
- metrics.summary.p95_latency_ms,
- metrics.summary.p99_latency_ms,
- metrics.summary.p999_latency_ms,
- metrics.summary.p9999_latency_ms,
- metrics.summary.avg_latency_ms,
- metrics.summary.median_latency_ms,
- metrics.summary.min_latency_ms,
- metrics.summary.max_latency_ms,
- metrics.summary.std_dev_latency_ms,
- );
+ if pretty {
+ let width = get_terminal_width();
+
+ if width >= WIDE_LAYOUT_THRESHOLD {
+ Self::print_wide_layout(
+ producer_id,
+ total_messages,
+ message_batches,
+ messages_per_batch,
+ metrics,
+ );
+ } else {
+ Self::print_narrow_layout(
+ producer_id,
+ total_messages,
+ message_batches,
+ messages_per_batch,
+ metrics,
+ );
+ }
+ } else {
+ info!(
+ "Producer #{} → sent {} messages in {} batches of {} messages
in {:.2} s, total size: {}, average throughput: {:.2} MB/s, \
+ p50 latency: {:.2} ms, p90 latency: {:.2} ms, p95 latency: {:.2}
ms, p99 latency: {:.2} ms, p999 latency: {:.2} ms, p9999 latency: {:.2} ms, \
+ average latency: {:.2} ms, median latency: {:.2} ms, min latency:
{:.2} ms, max latency: {:.2} ms, std dev latency: {:.2} ms",
+ producer_id,
+ total_messages.human_count_bare(),
+ message_batches.human_count_bare(),
+ messages_per_batch,
+ metrics.summary.total_time_secs,
+ IggyByteSize::from(metrics.summary.total_user_data_bytes),
+ metrics.summary.throughput_megabytes_per_second,
+ metrics.summary.p50_latency_ms,
+ metrics.summary.p90_latency_ms,
+ metrics.summary.p95_latency_ms,
+ metrics.summary.p99_latency_ms,
+ metrics.summary.p999_latency_ms,
+ metrics.summary.p9999_latency_ms,
+ metrics.summary.avg_latency_ms,
+ metrics.summary.median_latency_ms,
+ metrics.summary.min_latency_ms,
+ metrics.summary.max_latency_ms,
+ metrics.summary.std_dev_latency_ms,
+ );
+ }
+ }
+
+ fn print_wide_layout(
+ producer_id: u32,
+ total_messages: u64,
+ message_batches: u64,
+ messages_per_batch: &BenchmarkNumericParameter,
+ metrics: &BenchmarkIndividualMetrics,
+ ) {
+ let mut summary_table = Table::new();
+ summary_table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic);
+
+ summary_table.add_row(vec![
+ format!("Producer #{}", producer_id),
+ format!("{} msgs", total_messages.human_count_bare()),
+ format!(
+ "{} x {}",
+ message_batches.human_count_bare(),
+ messages_per_batch
+ ),
+
IggyByteSize::from(metrics.summary.total_user_data_bytes).to_string(),
+ format!("{:.2} s", metrics.summary.total_time_secs),
+ format!(
+ "{:.2} MB/s",
+ metrics.summary.throughput_megabytes_per_second
+ ),
+ ]);
+
+ println!("\n{summary_table}");
+
+ let mut latency_table = Table::new();
+ latency_table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic);
+
+ latency_table.add_row(vec![
+ "Latency", "p50", "p90", "p95", "p99", "p999", "p9999", "avg",
"median", "min", "max",
+ "std dev",
+ ]);
+ latency_table.add_row(vec![
+ "(ms)".to_string(),
+ format!("{:.2}", metrics.summary.p50_latency_ms),
+ format!("{:.2}", metrics.summary.p90_latency_ms),
+ format!("{:.2}", metrics.summary.p95_latency_ms),
+ format!("{:.2}", metrics.summary.p99_latency_ms),
+ format!("{:.2}", metrics.summary.p999_latency_ms),
+ format!("{:.2}", metrics.summary.p9999_latency_ms),
+ format!("{:.2}", metrics.summary.avg_latency_ms),
+ format!("{:.2}", metrics.summary.median_latency_ms),
+ format!("{:.2}", metrics.summary.min_latency_ms),
+ format!("{:.2}", metrics.summary.max_latency_ms),
+ format!("{:.2}", metrics.summary.std_dev_latency_ms),
+ ]);
+
+ println!("{latency_table}");
+ }
+
+ fn print_narrow_layout(
+ producer_id: u32,
+ total_messages: u64,
+ message_batches: u64,
+ messages_per_batch: &BenchmarkNumericParameter,
+ metrics: &BenchmarkIndividualMetrics,
+ ) {
+ let mut table = Table::new();
+ table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic)
+ .set_width(60);
+
+ table.add_row(vec![format!("Producer #{}", producer_id),
String::new()]);
+
+ table.add_row(vec!["Summary", ""]);
+ table.add_row(vec![
+ "Messages".to_string(),
+ total_messages.human_count_bare().to_string(),
+ ]);
+ table.add_row(vec![
+ "Batches".to_string(),
+ format!(
+ "{} x {}",
+ message_batches.human_count_bare(),
+ messages_per_batch
+ ),
+ ]);
+ table.add_row(vec![
+ "Total Size".to_string(),
+
IggyByteSize::from(metrics.summary.total_user_data_bytes).to_string(),
+ ]);
+ table.add_row(vec![
+ "Duration".to_string(),
+ format!("{:.2} s", metrics.summary.total_time_secs),
+ ]);
+
+ table.add_row(vec!["Throughput", ""]);
+ table.add_row(vec![
+ "MB/s".to_string(),
+ format!("{:.2}", metrics.summary.throughput_megabytes_per_second),
+ ]);
+
+ table.add_row(vec!["Latency", ""]);
+ table.add_row(vec![
+ "p50".to_string(),
+ format!("{:.2} ms", metrics.summary.p50_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p90".to_string(),
+ format!("{:.2} ms", metrics.summary.p90_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p95".to_string(),
+ format!("{:.2} ms", metrics.summary.p95_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p99".to_string(),
+ format!("{:.2} ms", metrics.summary.p99_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p999".to_string(),
+ format!("{:.2} ms", metrics.summary.p999_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p9999".to_string(),
+ format!("{:.2} ms", metrics.summary.p9999_latency_ms),
+ ]);
+ table.add_row(vec![
+ "avg".to_string(),
+ format!("{:.2} ms", metrics.summary.avg_latency_ms),
+ ]);
+ table.add_row(vec![
+ "median".to_string(),
+ format!("{:.2} ms", metrics.summary.median_latency_ms),
+ ]);
+ table.add_row(vec![
+ "min".to_string(),
+ format!("{:.2} ms", metrics.summary.min_latency_ms),
+ ]);
+ table.add_row(vec![
+ "max".to_string(),
+ format!("{:.2} ms", metrics.summary.max_latency_ms),
+ ]);
+ table.add_row(vec![
+ "std dev".to_string(),
+ format!("{:.2} ms", metrics.summary.std_dev_latency_ms),
+ ]);
+
+ println!("\n{table}");
}
}
diff --git a/core/bench/src/actors/producer/client/interface.rs
b/core/bench/src/actors/producer/client/interface.rs
index d48f7ec31..540a7f1ca 100644
--- a/core/bench/src/actors/producer/client/interface.rs
+++ b/core/bench/src/actors/producer/client/interface.rs
@@ -31,6 +31,7 @@ pub struct BenchmarkProducerConfig {
pub messages_per_batch: BenchmarkNumericParameter,
pub message_size: BenchmarkNumericParameter,
pub warmup_time: IggyDuration,
+ pub pretty: bool,
}
pub trait ProducerClient: Send + Sync {
diff --git a/core/bench/src/actors/producer/typed_benchmark_producer.rs
b/core/bench/src/actors/producer/typed_benchmark_producer.rs
index 32655ee83..af1ce26e2 100644
--- a/core/bench/src/actors/producer/typed_benchmark_producer.rs
+++ b/core/bench/src/actors/producer/typed_benchmark_producer.rs
@@ -55,6 +55,7 @@ impl TypedBenchmarkProducer {
sampling_time: IggyDuration,
moving_average_window: u32,
limit_bytes_per_second: Option<IggyByteSize>,
+ pretty: bool,
) -> Self {
let config = BenchmarkProducerConfig {
producer_id,
@@ -63,6 +64,7 @@ impl TypedBenchmarkProducer {
messages_per_batch,
message_size,
warmup_time,
+ pretty,
};
if use_high_level_api {
diff --git
a/core/bench/src/actors/producing_consumer/benchmark_producing_consumer.rs
b/core/bench/src/actors/producing_consumer/benchmark_producing_consumer.rs
index a676d449a..ace02e2fc 100644
--- a/core/bench/src/actors/producing_consumer/benchmark_producing_consumer.rs
+++ b/core/bench/src/actors/producing_consumer/benchmark_producing_consumer.rs
@@ -30,9 +30,13 @@ use crate::{
},
};
use bench_report::{
- actor_kind::ActorKind, benchmark_kind::BenchmarkKind,
- individual_metrics::BenchmarkIndividualMetrics,
numeric_parameter::BenchmarkNumericParameter,
+ actor_kind::ActorKind,
+ benchmark_kind::BenchmarkKind,
+ individual_metrics::BenchmarkIndividualMetrics,
+ numeric_parameter::BenchmarkNumericParameter,
+ utils::{WIDE_LAYOUT_THRESHOLD, get_terminal_width},
};
+use comfy_table::{ContentArrangement, Table, presets::UTF8_FULL};
use human_repr::HumanCount;
use iggy::prelude::*;
use tokio::time::Instant;
@@ -222,6 +226,7 @@ where
recv_batches,
&self.producer_config.messages_per_batch,
&metrics,
+ self.producer_config.pretty,
);
Ok(metrics)
}
@@ -271,29 +276,204 @@ where
total_batches: u64,
messages_per_batch: &BenchmarkNumericParameter,
metrics: &BenchmarkIndividualMetrics,
+ pretty: bool,
) {
- info!(
- "ProducingConsumer #{} → sent and received {} messages in {}
batches of {} messages in {:.2} s, total size: {}, average throughput: {:.2}
MB/s, \
- p50 latency: {:.2} ms, p90 latency: {:.2} ms, p95 latency: {:.2} ms, p99
latency: {:.2} ms, p999 latency: {:.2} ms, p9999 latency: {:.2} ms, \
- average latency: {:.2} ms, median latency: {:.2} ms, min latency: {:.2}
ms, max latency: {:.2} ms, std dev latency: {:.2} ms",
- actor_id,
- total_messages.human_count_bare(),
- total_batches.human_count_bare(),
- messages_per_batch,
- metrics.summary.total_time_secs,
- IggyByteSize::from(metrics.summary.total_user_data_bytes),
- metrics.summary.throughput_megabytes_per_second,
- metrics.summary.p50_latency_ms,
- metrics.summary.p90_latency_ms,
- metrics.summary.p95_latency_ms,
- metrics.summary.p99_latency_ms,
- metrics.summary.p999_latency_ms,
- metrics.summary.p9999_latency_ms,
- metrics.summary.avg_latency_ms,
- metrics.summary.median_latency_ms,
- metrics.summary.min_latency_ms,
- metrics.summary.max_latency_ms,
- metrics.summary.std_dev_latency_ms,
- );
+ if pretty {
+ let width = get_terminal_width();
+
+ if width >= WIDE_LAYOUT_THRESHOLD {
+ Self::print_wide_layout(
+ actor_id,
+ total_messages,
+ total_batches,
+ messages_per_batch,
+ metrics,
+ );
+ } else {
+ Self::print_narrow_layout(
+ actor_id,
+ total_messages,
+ total_batches,
+ messages_per_batch,
+ metrics,
+ );
+ }
+ } else {
+ info!(
+ "ProducingConsumer #{} → sent and received {} messages in {}
batches of {} messages in {:.2} s, total size: {}, average throughput: {:.2}
MB/s, \
+ p50 latency: {:.2} ms, p90 latency: {:.2} ms, p95 latency: {:.2}
ms, p99 latency: {:.2} ms, p999 latency: {:.2} ms, p9999 latency: {:.2} ms, \
+ average latency: {:.2} ms, median latency: {:.2} ms, min latency:
{:.2} ms, max latency: {:.2} ms, std dev latency: {:.2} ms",
+ actor_id,
+ total_messages.human_count_bare(),
+ total_batches.human_count_bare(),
+ messages_per_batch,
+ metrics.summary.total_time_secs,
+ IggyByteSize::from(metrics.summary.total_user_data_bytes),
+ metrics.summary.throughput_megabytes_per_second,
+ metrics.summary.p50_latency_ms,
+ metrics.summary.p90_latency_ms,
+ metrics.summary.p95_latency_ms,
+ metrics.summary.p99_latency_ms,
+ metrics.summary.p999_latency_ms,
+ metrics.summary.p9999_latency_ms,
+ metrics.summary.avg_latency_ms,
+ metrics.summary.median_latency_ms,
+ metrics.summary.min_latency_ms,
+ metrics.summary.max_latency_ms,
+ metrics.summary.std_dev_latency_ms,
+ );
+ }
+ }
+
+ fn print_wide_layout(
+ actor_id: u32,
+ total_messages: u64,
+ total_batches: u64,
+ messages_per_batch: &BenchmarkNumericParameter,
+ metrics: &BenchmarkIndividualMetrics,
+ ) {
+ let mut summary_table = Table::new();
+ summary_table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic);
+
+ summary_table.add_row(vec![
+ format!("ProducingConsumer #{}", actor_id),
+ format!("{} msgs (sent & recv)",
total_messages.human_count_bare()),
+ format!(
+ "{} x {}",
+ total_batches.human_count_bare(),
+ messages_per_batch
+ ),
+
IggyByteSize::from(metrics.summary.total_user_data_bytes).to_string(),
+ format!("{:.2} s", metrics.summary.total_time_secs),
+ format!(
+ "{:.2} MB/s",
+ metrics.summary.throughput_megabytes_per_second
+ ),
+ ]);
+
+ println!("\n{summary_table}");
+
+ let mut latency_table = Table::new();
+ latency_table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic);
+
+ latency_table.add_row(vec![
+ "Latency", "p50", "p90", "p95", "p99", "p999", "p9999", "avg",
"median", "min", "max",
+ "std dev",
+ ]);
+ latency_table.add_row(vec![
+ "(ms)".to_string(),
+ format!("{:.2}", metrics.summary.p50_latency_ms),
+ format!("{:.2}", metrics.summary.p90_latency_ms),
+ format!("{:.2}", metrics.summary.p95_latency_ms),
+ format!("{:.2}", metrics.summary.p99_latency_ms),
+ format!("{:.2}", metrics.summary.p999_latency_ms),
+ format!("{:.2}", metrics.summary.p9999_latency_ms),
+ format!("{:.2}", metrics.summary.avg_latency_ms),
+ format!("{:.2}", metrics.summary.median_latency_ms),
+ format!("{:.2}", metrics.summary.min_latency_ms),
+ format!("{:.2}", metrics.summary.max_latency_ms),
+ format!("{:.2}", metrics.summary.std_dev_latency_ms),
+ ]);
+
+ println!("{latency_table}");
+ }
+
+ fn print_narrow_layout(
+ actor_id: u32,
+ total_messages: u64,
+ total_batches: u64,
+ messages_per_batch: &BenchmarkNumericParameter,
+ metrics: &BenchmarkIndividualMetrics,
+ ) {
+ let mut table = Table::new();
+ table
+ .load_preset(UTF8_FULL)
+ .set_content_arrangement(ContentArrangement::Dynamic)
+ .set_width(60);
+
+ table.add_row(vec![
+ format!("ProducingConsumer #{}", actor_id),
+ String::new(),
+ ]);
+
+ table.add_row(vec!["Summary", ""]);
+ table.add_row(vec![
+ "Messages (sent & received)".to_string(),
+ total_messages.human_count_bare().to_string(),
+ ]);
+ table.add_row(vec![
+ "Batches".to_string(),
+ format!(
+ "{} x {}",
+ total_batches.human_count_bare(),
+ messages_per_batch
+ ),
+ ]);
+ table.add_row(vec![
+ "Total Size".to_string(),
+
IggyByteSize::from(metrics.summary.total_user_data_bytes).to_string(),
+ ]);
+ table.add_row(vec![
+ "Duration".to_string(),
+ format!("{:.2} s", metrics.summary.total_time_secs),
+ ]);
+
+ table.add_row(vec!["Throughput", ""]);
+ table.add_row(vec![
+ "MB/s".to_string(),
+ format!("{:.2}", metrics.summary.throughput_megabytes_per_second),
+ ]);
+
+ table.add_row(vec!["Latency", ""]);
+ table.add_row(vec![
+ "p50".to_string(),
+ format!("{:.2} ms", metrics.summary.p50_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p90".to_string(),
+ format!("{:.2} ms", metrics.summary.p90_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p95".to_string(),
+ format!("{:.2} ms", metrics.summary.p95_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p99".to_string(),
+ format!("{:.2} ms", metrics.summary.p99_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p999".to_string(),
+ format!("{:.2} ms", metrics.summary.p999_latency_ms),
+ ]);
+ table.add_row(vec![
+ "p9999".to_string(),
+ format!("{:.2} ms", metrics.summary.p9999_latency_ms),
+ ]);
+ table.add_row(vec![
+ "avg".to_string(),
+ format!("{:.2} ms", metrics.summary.avg_latency_ms),
+ ]);
+ table.add_row(vec![
+ "median".to_string(),
+ format!("{:.2} ms", metrics.summary.median_latency_ms),
+ ]);
+ table.add_row(vec![
+ "min".to_string(),
+ format!("{:.2} ms", metrics.summary.min_latency_ms),
+ ]);
+ table.add_row(vec![
+ "max".to_string(),
+ format!("{:.2} ms", metrics.summary.max_latency_ms),
+ ]);
+ table.add_row(vec![
+ "std dev".to_string(),
+ format!("{:.2} ms", metrics.summary.std_dev_latency_ms),
+ ]);
+
+ println!("\n{table}");
}
}
diff --git
a/core/bench/src/actors/producing_consumer/typed_benchmark_producing_consumer.rs
b/core/bench/src/actors/producing_consumer/typed_benchmark_producing_consumer.rs
index 16e12a59d..d65d26ff9 100644
---
a/core/bench/src/actors/producing_consumer/typed_benchmark_producing_consumer.rs
+++
b/core/bench/src/actors/producing_consumer/typed_benchmark_producing_consumer.rs
@@ -64,6 +64,7 @@ impl TypedBenchmarkProducingConsumer {
limit_bytes_per_second: Option<IggyByteSize>,
polling_kind: PollingKind,
origin_timestamp_latency_calculation: bool,
+ pretty: bool,
) -> Self {
let producer_config = BenchmarkProducerConfig {
producer_id: actor_id,
@@ -72,6 +73,7 @@ impl TypedBenchmarkProducingConsumer {
messages_per_batch,
message_size,
warmup_time,
+ pretty,
};
let consumer_config = BenchmarkConsumerConfig {
@@ -82,6 +84,7 @@ impl TypedBenchmarkProducingConsumer {
warmup_time,
polling_kind,
origin_timestamp_latency_calculation,
+ pretty,
};
if use_high_level_api {
diff --git a/core/bench/src/args/common.rs b/core/bench/src/args/common.rs
index 7042f541f..f575c8bec 100644
--- a/core/bench/src/args/common.rs
+++ b/core/bench/src/args/common.rs
@@ -92,6 +92,10 @@ pub struct IggyBenchArgs {
#[arg(long, short = 'p', default_value_t =
DEFAULT_ROOT_PASSWORD.to_string())]
pub password: String,
+ /// Pretty table output for benchmark results
+ #[arg(long, default_value_t = false)]
+ pub pretty: bool,
+
/// Reuse existing bench streams instead of deleting and recreating them.
/// Without this flag, existing bench streams are deleted to ensure
/// consumers start with fresh data and accurate latency measurements.
diff --git a/core/bench/src/benchmarks/common.rs
b/core/bench/src/benchmarks/common.rs
index 48025188a..abf43082d 100644
--- a/core/bench/src/benchmarks/common.rs
+++ b/core/bench/src/benchmarks/common.rs
@@ -116,6 +116,7 @@ pub fn build_producer_futures(
let sampling_time = args.sampling_time();
let moving_average_window = args.moving_average_window();
let kind = args.kind();
+ let pretty = args.pretty;
let shared_finish_condition =
BenchmarkFinishCondition::new(args,
BenchmarkFinishConditionMode::Shared);
let rate_limit = rate_limit_per_actor(args.rate_limit(), actors);
@@ -149,6 +150,7 @@ pub fn build_producer_futures(
sampling_time,
moving_average_window,
rate_limit,
+ pretty,
);
producer.run().await
}
@@ -168,6 +170,7 @@ pub fn build_consumer_futures(
let sampling_time = args.sampling_time();
let moving_average_window = args.moving_average_window();
let kind = args.kind();
+ let pretty = args.pretty;
let polling_kind = if cg_count > 0 {
PollingKind::Next
} else {
@@ -236,6 +239,7 @@ pub fn build_consumer_futures(
polling_kind,
rate_limit,
origin_timestamp_latency_calculation,
+ pretty,
);
consumer.run().await
}
@@ -298,6 +302,7 @@ pub fn build_producing_consumers_futures(
rate_limit,
polling_kind,
origin_timestamp_latency_calculation,
+ args_clone.pretty,
);
actor.run().await
}
@@ -400,6 +405,7 @@ pub fn build_producing_consumer_groups_futures(
rate_limit,
polling_kind,
origin_timestamp_latency_calculation,
+ args_clone.pretty,
);
actor.run().await
diff --git a/core/bench/src/runner.rs b/core/bench/src/runner.rs
index 6813ec040..46cdf03ed 100644
--- a/core/bench/src/runner.rs
+++ b/core/bench/src/runner.rs
@@ -41,6 +41,7 @@ impl BenchmarkRunner {
#[allow(clippy::cognitive_complexity)]
pub async fn run(mut self) -> Result<(), IggyError> {
let args = self.args.take().unwrap();
+ let pretty = args.pretty;
let should_open_charts = args.open_charts();
let transport = args.transport();
@@ -87,7 +88,7 @@ impl BenchmarkRunner {
// Sleep just to see result prints after all tasks are joined (they
print per-actor results)
sleep(Duration::from_millis(10)).await;
- report.print_summary();
+ report.print_summary(pretty);
if let Some(output_dir) = benchmark.args().output_dir() {
// Generate the full output path using the directory name generator