On 2022-09-14 11:29, Kevin Laatz wrote:
Add API unit tests and perf unit tests for the newly added lcore poll
busyness feature.
Signed-off-by: Kevin Laatz <kevin.la...@intel.com>
---
app/test/meson.build | 4 +
app/test/test_lcore_poll_busyness_api.c | 134 +++++++++++++++++++++++
app/test/test_lcore_poll_busyness_perf.c | 72 ++++++++++++
3 files changed, 210 insertions(+)
create mode 100644 app/test/test_lcore_poll_busyness_api.c
create mode 100644 app/test/test_lcore_poll_busyness_perf.c
diff --git a/app/test/meson.build b/app/test/meson.build
index bf1d81f84a..d543e730a2 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -74,6 +74,8 @@ test_sources = files(
'test_ipsec_perf.c',
'test_kni.c',
'test_kvargs.c',
+ 'test_lcore_poll_busyness_api.c',
+ 'test_lcore_poll_busyness_perf.c',
'test_lcores.c',
'test_logs.c',
'test_lpm.c',
@@ -192,6 +194,7 @@ fast_tests = [
['interrupt_autotest', true, true],
['ipfrag_autotest', false, true],
['lcores_autotest', true, true],
+ ['lcore_poll_busyness_autotest', true, true],
['logs_autotest', true, true],
['lpm_autotest', true, true],
['lpm6_autotest', true, true],
@@ -292,6 +295,7 @@ perf_test_names = [
'trace_perf_autotest',
'ipsec_perf_autotest',
'thash_perf_autotest',
+ 'lcore_poll_busyness_perf_autotest'
]
driver_test_names = [
diff --git a/app/test/test_lcore_poll_busyness_api.c
b/app/test/test_lcore_poll_busyness_api.c
new file mode 100644
index 0000000000..db76322994
--- /dev/null
+++ b/app/test/test_lcore_poll_busyness_api.c
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <rte_lcore.h>
+
+#include "test.h"
+
+/* Arbitrary amount of "work" to simulate busyness with */
+#define WORK 32
+#define TIMESTAMP_ITERS 1000000
+
+#define LCORE_POLL_BUSYNESS_NOT_SET -1
+
+static int
+test_lcore_poll_busyness_enable_disable(void)
+{
+ int initial_state, curr_state;
+ bool req_state;
+
+ /* Get the initial state */
+ initial_state = rte_lcore_poll_busyness_enabled();
+ if (initial_state == -ENOTSUP)
+ return TEST_SKIPPED;
+
+ /* Set state to the inverse of the initial state and check for the
change */
+ req_state = !initial_state;
+ rte_lcore_poll_busyness_enabled_set(req_state);
+ curr_state = rte_lcore_poll_busyness_enabled();
+ if (curr_state != req_state)
+ return TEST_FAILED;
+
+ /* Now change the state back to the original state. By changing it
back, both
+ * enable and disable will have been tested.
+ */
+ req_state = !curr_state;
+ rte_lcore_poll_busyness_enabled_set(req_state);
+ curr_state = rte_lcore_poll_busyness_enabled();
+ if (curr_state != req_state)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_lcore_poll_busyness_invalid_lcore(void)
+{
+ int ret;
+
+ /* Check if lcore poll busyness is enabled */
+ if (rte_lcore_poll_busyness_enabled() == -ENOTSUP)
+ return TEST_SKIPPED;
+
+ /* Only lcore_id <= RTE_MAX_LCORE are valid */
+ ret = rte_lcore_poll_busyness(RTE_MAX_LCORE);
+ if (ret != -EINVAL)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_lcore_poll_busyness_inactive_lcore(void)
+{
+ int ret;
+
+ /* Check if lcore poll busyness is enabled */
+ if (rte_lcore_poll_busyness_enabled() == -ENOTSUP)
+ return TEST_SKIPPED;
+
+ /* Use the test thread lcore_id for this test. Since it is not a polling
+ * application, the busyness is expected to return -1.
+ *
+ * Note: this will not work with affinitized cores
+ */
+ ret = rte_lcore_poll_busyness(rte_lcore_id());
+ if (ret != LCORE_POLL_BUSYNESS_NOT_SET)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
+
+static void
+simulate_lcore_poll_busyness(int iters)
+{
+ int i;
+
+ for (i = 0; i < iters; i++)
+ RTE_LCORE_POLL_BUSYNESS_TIMESTAMP(WORK);
+}
+
+/* The test cannot know of an application running to test for valid lcore poll
+ * busyness data. For this test, we simulate lcore poll busyness for the
+ * lcore_id of the test thread for testing purposes.
+ */
+static int
+test_lcore_poll_busyness_active_lcore(void)
+{
+ int ret;
+
+ /* Check if lcore poll busyness is enabled */
+ if (rte_lcore_poll_busyness_enabled() == -ENOTSUP)
+ return TEST_SKIPPED;
+
+ simulate_lcore_poll_busyness(TIMESTAMP_ITERS);
+
+ /* After timestamping with "work" many times, lcore poll busyness should
be > 0 */
+ ret = rte_lcore_poll_busyness(rte_lcore_id());
+ if (ret <= 0)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
+
+static struct unit_test_suite lcore_poll_busyness_tests = {
+ .suite_name = "lcore poll busyness autotest",
+ .setup = NULL,
+ .teardown = NULL,
+ .unit_test_cases = {
+ TEST_CASE(test_lcore_poll_busyness_enable_disable),
+ TEST_CASE(test_lcore_poll_busyness_invalid_lcore),
+ TEST_CASE(test_lcore_poll_busyness_inactive_lcore),
+ TEST_CASE(test_lcore_poll_busyness_active_lcore),
+ TEST_CASES_END()
+ }
+};
+
+static int
+test_lcore_poll_busyness_api(void)
+{
+ return unit_test_suite_runner(&lcore_poll_busyness_tests);
+}
+
+REGISTER_TEST_COMMAND(lcore_poll_busyness_autotest,
test_lcore_poll_busyness_api);
diff --git a/app/test/test_lcore_poll_busyness_perf.c
b/app/test/test_lcore_poll_busyness_perf.c
new file mode 100644
index 0000000000..5c27d21b00
--- /dev/null
+++ b/app/test/test_lcore_poll_busyness_perf.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+
+#include "test.h"
+
+/* Arbitrary amount of "work" to simulate busyness with */
+#define WORK 32
+#define TIMESTAMP_ITERS 1000000
+#define TEST_ITERS 10000
+
+static void
+simulate_lcore_poll_busyness(int iters)
+{
+ int i;
+
+ for (i = 0; i < iters; i++)
+ RTE_LCORE_POLL_BUSYNESS_TIMESTAMP(WORK);
+}
+
+static void
+test_timestamp_perf(void)
+{
+ uint64_t start, end, diff;
+ uint64_t min = UINT64_MAX;
+ uint64_t max = 0;
+ uint64_t total = 0;
+ int i;
+
+ for (i = 0; i < TEST_ITERS; i++) {
+ start = rte_rdtsc();
+ RTE_LCORE_POLL_BUSYNESS_TIMESTAMP(WORK);
This is how it will look for a thread which is always busy. That's a
relevant case, but not the only such.
Do one with WORK replaced by (i & 1), and you get the other extreme.
Applications that poll multiple sources of work before performing any
actual processing, and which are following your advice about the use of
the macro, will see these kind of latencies.
+ end = rte_rdtsc();
+
+ diff = end - start;
+ min = RTE_MIN(diff, min);
+ max = RTE_MAX(diff, max);
+ total += diff;
+ }
+
+ printf("### Timestamp perf ###\n");
+ printf("Min cycles: %"PRIu64"\n", min);
+ printf("Avg cycles: %"PRIu64"\n", total / TEST_ITERS);
+ printf("Max cycles: %"PRIu64"\n", max);
+ printf("\n");
+}
+
+
+static int
+test_lcore_poll_busyness_perf(void)
+{
+ if (rte_lcore_poll_busyness_enabled() == -ENOTSUP) {
+ printf("Lcore poll busyness may be disabled...\n");
+ return TEST_SKIPPED;
+ }
+
+ /* Initialize and prime the timestamp struct with simulated "work" for
this lcore */
+ simulate_lcore_poll_busyness(10000);
+
+ /* Run perf tests */
+ test_timestamp_perf();
+
+ return TEST_SUCCESS;
+}
+
+REGISTER_TEST_COMMAND(lcore_poll_busyness_perf_autotest,
test_lcore_poll_busyness_perf);