Add SG copy support.

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukri...@marvell.com>
Acked-by: Anoob Joseph <ano...@marvell.com>
---
 app/test-dma-perf/benchmark.c | 274 +++++++++++++++++++++++++++++-----
 app/test-dma-perf/config.ini  |  19 ++-
 app/test-dma-perf/main.c      |  34 ++++-
 app/test-dma-perf/main.h      |   5 +-
 4 files changed, 292 insertions(+), 40 deletions(-)

diff --git a/app/test-dma-perf/benchmark.c b/app/test-dma-perf/benchmark.c
index 034461da4e..4530bd98ce 100644
--- a/app/test-dma-perf/benchmark.c
+++ b/app/test-dma-perf/benchmark.c
@@ -46,6 +46,10 @@ struct lcore_params {
        uint16_t test_secs;
        struct rte_mbuf **srcs;
        struct rte_mbuf **dsts;
+       struct rte_dma_sge *src_sges;
+       struct rte_dma_sge *dst_sges;
+       uint8_t src_ptrs;
+       uint8_t dst_ptrs;
        volatile struct worker_info worker_info;
 };
 
@@ -86,21 +90,31 @@ calc_result(uint32_t buf_size, uint32_t nr_buf, uint16_t 
nb_workers, uint16_t te
 }
 
 static void
-output_result(uint8_t scenario_id, uint32_t lcore_id, char *dma_name, uint16_t 
ring_size,
-                       uint16_t kick_batch, uint64_t ave_cycle, uint32_t 
buf_size, uint32_t nr_buf,
-                       float memory, float bandwidth, float mops, bool is_dma)
+output_result(struct test_configure *cfg, struct lcore_params *para,
+                       uint16_t kick_batch, uint64_t ave_cycle, uint32_t 
buf_size,
+                       uint32_t nr_buf, float memory, float bandwidth, float 
mops)
 {
-       if (is_dma)
-               printf("lcore %u, DMA %s, DMA Ring Size: %u, Kick Batch Size: 
%u.\n",
-                               lcore_id, dma_name, ring_size, kick_batch);
-       else
+       uint16_t ring_size = cfg->ring_size.cur;
+       uint8_t scenario_id = cfg->scenario_id;
+       uint32_t lcore_id = para->lcore_id;
+       char *dma_name = para->dma_name;
+
+       if (cfg->is_dma) {
+               printf("lcore %u, DMA %s, DMA Ring Size: %u, Kick Batch Size: 
%u", lcore_id,
+                      dma_name, ring_size, kick_batch);
+               if (cfg->is_sg)
+                       printf(" DMA src ptrs: %u, dst ptrs: %u",
+                              para->src_ptrs, para->dst_ptrs);
+               printf(".\n");
+       } else {
                printf("lcore %u\n", lcore_id);
+       }
 
        printf("Average Cycles/op: %" PRIu64 ", Buffer Size: %u B, Buffer 
Number: %u, Memory: %.2lf MB, Frequency: %.3lf Ghz.\n",
                        ave_cycle, buf_size, nr_buf, memory, 
rte_get_timer_hz()/1000000000.0);
        printf("Average Bandwidth: %.3lf Gbps, MOps: %.3lf\n", bandwidth, mops);
 
-       if (is_dma)
+       if (cfg->is_dma)
                snprintf(output_str[lcore_id], MAX_OUTPUT_STR_LEN, 
CSV_LINE_DMA_FMT,
                        scenario_id, lcore_id, dma_name, ring_size, kick_batch, 
buf_size,
                        nr_buf, memory, ave_cycle, bandwidth, mops);
@@ -167,7 +181,7 @@ vchan_data_populate(uint32_t dev_id, struct 
rte_dma_vchan_conf *qconf,
 
 /* Configuration of device. */
 static void
-configure_dmadev_queue(uint32_t dev_id, struct test_configure *cfg)
+configure_dmadev_queue(uint32_t dev_id, struct test_configure *cfg, uint8_t 
ptrs_max)
 {
        uint16_t vchan = 0;
        struct rte_dma_info info;
@@ -190,6 +204,10 @@ configure_dmadev_queue(uint32_t dev_id, struct 
test_configure *cfg)
                rte_exit(EXIT_FAILURE, "Error, no configured queues reported on 
device id. %u\n",
                                dev_id);
 
+       if (info.max_sges < ptrs_max)
+               rte_exit(EXIT_FAILURE, "Error, DMA ptrs more than supported by 
device id %u.\n",
+                               dev_id);
+
        if (rte_dma_start(dev_id) != 0)
                rte_exit(EXIT_FAILURE, "Error with dma start.\n");
 }
@@ -202,8 +220,12 @@ config_dmadevs(struct test_configure *cfg)
        uint32_t i;
        int dev_id;
        uint16_t nb_dmadevs = 0;
+       uint8_t ptrs_max = 0;
        char *dma_name;
 
+       if (cfg->is_sg)
+               ptrs_max = RTE_MAX(cfg->src_ptrs, cfg->dst_ptrs);
+
        for (i = 0; i < ldm->cnt; i++) {
                dma_name = ldm->dma_names[i];
                dev_id = rte_dma_get_dev_id_by_name(dma_name);
@@ -213,7 +235,7 @@ config_dmadevs(struct test_configure *cfg)
                }
 
                ldm->dma_ids[i] = dev_id;
-               configure_dmadev_queue(dev_id, cfg);
+               configure_dmadev_queue(dev_id, cfg, ptrs_max);
                ++nb_dmadevs;
        }
 
@@ -253,7 +275,7 @@ do_dma_submit_and_poll(uint16_t dev_id, uint64_t *async_cnt,
 }
 
 static inline int
-do_dma_mem_copy(void *p)
+do_dma_plain_mem_copy(void *p)
 {
        struct lcore_params *para = (struct lcore_params *)p;
        volatile struct worker_info *worker_info = &(para->worker_info);
@@ -306,6 +328,65 @@ do_dma_mem_copy(void *p)
        return 0;
 }
 
+static inline int
+do_dma_sg_mem_copy(void *p)
+{
+       struct lcore_params *para = (struct lcore_params *)p;
+       volatile struct worker_info *worker_info = &(para->worker_info);
+       struct rte_dma_sge *src_sges = para->src_sges;
+       struct rte_dma_sge *dst_sges = para->dst_sges;
+       const uint16_t kick_batch = para->kick_batch;
+       const uint8_t src_ptrs = para->src_ptrs;
+       const uint8_t dst_ptrs = para->dst_ptrs;
+       const uint16_t dev_id = para->dev_id;
+       uint32_t nr_buf = para->nr_buf;
+       uint64_t async_cnt = 0;
+       uint32_t poll_cnt = 0;
+       uint16_t nr_cpl;
+       uint32_t i, j;
+       int ret;
+
+       nr_buf /= RTE_MAX(src_ptrs, dst_ptrs);
+       worker_info->stop_flag = false;
+       worker_info->ready_flag = true;
+
+       while (!worker_info->start_flag)
+               ;
+
+       while (1) {
+               j = 0;
+               for (i = 0; i < nr_buf; i++) {
+dma_copy:
+                       ret = rte_dma_copy_sg(dev_id, 0,
+                               &src_sges[i * src_ptrs], &dst_sges[j * 
dst_ptrs],
+                               src_ptrs, dst_ptrs, 0);
+                       if (unlikely(ret < 0)) {
+                               if (ret == -ENOSPC) {
+                                       do_dma_submit_and_poll(dev_id, 
&async_cnt, worker_info);
+                                       goto dma_copy;
+                               } else
+                                       error_exit(dev_id);
+                       }
+                       async_cnt++;
+                       j++;
+
+                       if ((async_cnt % kick_batch) == 0)
+                               do_dma_submit_and_poll(dev_id, &async_cnt, 
worker_info);
+               }
+
+               if (worker_info->stop_flag)
+                       break;
+       }
+
+       rte_dma_submit(dev_id, 0);
+       while ((async_cnt > 0) && (poll_cnt++ < POLL_MAX)) {
+               nr_cpl = rte_dma_completed(dev_id, 0, MAX_DMA_CPL_NB, NULL, 
NULL);
+               async_cnt -= nr_cpl;
+       }
+
+       return 0;
+}
+
 static inline int
 do_cpu_mem_copy(void *p)
 {
@@ -347,8 +428,9 @@ dummy_free_ext_buf(void *addr, void *opaque)
 }
 
 static int
-setup_memory_env(struct test_configure *cfg, struct rte_mbuf ***srcs,
-                       struct rte_mbuf ***dsts)
+setup_memory_env(struct test_configure *cfg,
+                        struct rte_mbuf ***srcs, struct rte_mbuf ***dsts,
+                        struct rte_dma_sge **src_sges, struct rte_dma_sge 
**dst_sges)
 {
        static struct rte_mbuf_ext_shared_info *ext_buf_info;
        unsigned int buf_size = cfg->buf_size.cur;
@@ -443,20 +525,56 @@ setup_memory_env(struct test_configure *cfg, struct 
rte_mbuf ***srcs,
                }
        }
 
+       if (cfg->is_sg) {
+               uint8_t src_ptrs = cfg->src_ptrs;
+               uint8_t dst_ptrs = cfg->dst_ptrs;
+               uint32_t sglen_src, sglen_dst;
+
+               *src_sges = rte_zmalloc(NULL, nr_buf * sizeof(struct 
rte_dma_sge),
+                                       RTE_CACHE_LINE_SIZE);
+               if (*src_sges == NULL) {
+                       printf("Error: src_sges array malloc failed.\n");
+                       return -1;
+               }
+
+               *dst_sges = rte_zmalloc(NULL, nr_buf * sizeof(struct 
rte_dma_sge),
+                                       RTE_CACHE_LINE_SIZE);
+               if (*dst_sges == NULL) {
+                       printf("Error: dst_sges array malloc failed.\n");
+                       return -1;
+               }
+
+               sglen_src = buf_size / src_ptrs;
+               sglen_dst = buf_size / dst_ptrs;
+
+               for (i = 0; i < nr_buf; i++) {
+                       (*src_sges)[i].addr = rte_pktmbuf_iova((*srcs)[i]);
+                       (*src_sges)[i].length = sglen_src;
+                       if (!((i+1) % src_ptrs))
+                               (*src_sges)[i].length += (buf_size % src_ptrs);
+
+                       (*dst_sges)[i].addr = rte_pktmbuf_iova((*dsts)[i]);
+                       (*dst_sges)[i].length = sglen_dst;
+                       if (!((i+1) % dst_ptrs))
+                               (*dst_sges)[i].length += (buf_size % dst_ptrs);
+               }
+       }
+
        return 0;
 }
 
 int
-mem_copy_benchmark(struct test_configure *cfg, bool is_dma)
+mem_copy_benchmark(struct test_configure *cfg)
 {
-       uint32_t i;
+       uint32_t i, j;
        uint32_t offset;
        unsigned int lcore_id = 0;
        struct rte_mbuf **srcs = NULL, **dsts = NULL, **m = NULL;
+       struct rte_dma_sge *src_sges = NULL, *dst_sges = NULL;
        struct lcore_dma_map_t *ldm = &cfg->lcore_dma_map;
+       const uint32_t mcore_id = rte_get_main_lcore();
        unsigned int buf_size = cfg->buf_size.cur;
        uint16_t kick_batch = cfg->kick_batch.cur;
-       uint32_t nr_buf = cfg->nr_buf = (cfg->mem_size.cur * 1024 * 1024) / 
(cfg->buf_size.cur * 2);
        uint16_t nb_workers = ldm->cnt;
        uint16_t test_secs = cfg->test_secs;
        float memory = 0;
@@ -464,12 +582,32 @@ mem_copy_benchmark(struct test_configure *cfg, bool 
is_dma)
        uint32_t avg_cycles_total;
        float mops, mops_total;
        float bandwidth, bandwidth_total;
+       uint32_t nr_sgsrc = 0, nr_sgdst = 0;
+       uint32_t nr_buf;
        int ret = 0;
 
-       if (setup_memory_env(cfg, &srcs, &dsts) < 0)
+       /* Align number of buffers according to workers count */
+       nr_buf = (cfg->mem_size.cur * 1024 * 1024) / (cfg->buf_size.cur * 2);
+       nr_buf -= (nr_buf % nb_workers);
+       if (cfg->is_sg) {
+               nr_buf /= nb_workers;
+               nr_buf -= nr_buf % (cfg->src_ptrs * cfg->dst_ptrs);
+               nr_buf *= nb_workers;
+
+               if (cfg->dst_ptrs > cfg->src_ptrs) {
+                       nr_sgsrc = (nr_buf / cfg->dst_ptrs * cfg->src_ptrs);
+                       nr_sgdst = nr_buf;
+               } else {
+                       nr_sgsrc = nr_buf;
+                       nr_sgdst = (nr_buf / cfg->src_ptrs * cfg->dst_ptrs);
+               }
+       }
+
+       cfg->nr_buf = nr_buf;
+       if (setup_memory_env(cfg, &srcs, &dsts, &src_sges, &dst_sges) < 0)
                goto out;
 
-       if (is_dma)
+       if (cfg->is_dma)
                if (config_dmadevs(cfg) < 0)
                        goto out;
 
@@ -483,13 +621,23 @@ mem_copy_benchmark(struct test_configure *cfg, bool 
is_dma)
 
        for (i = 0; i < nb_workers; i++) {
                lcore_id = ldm->lcores[i];
+               if (lcore_id == mcore_id) {
+                       printf("lcore parameters can not use main core id 
%d\n", mcore_id);
+                       goto out;
+               }
+
+               if (rte_eal_lcore_role(lcore_id) == ROLE_OFF) {
+                       printf("lcore parameters can not use offline core id 
%d\n", lcore_id);
+                       goto out;
+               }
+
                offset = nr_buf / nb_workers * i;
                lcores[i] = rte_malloc(NULL, sizeof(struct lcore_params), 0);
                if (lcores[i] == NULL) {
                        printf("lcore parameters malloc failure for lcore 
%d\n", lcore_id);
                        break;
                }
-               if (is_dma) {
+               if (cfg->is_dma) {
                        lcores[i]->dma_name = ldm->dma_names[i];
                        lcores[i]->dev_id = ldm->dma_ids[i];
                        lcores[i]->kick_batch = kick_batch;
@@ -503,10 +651,23 @@ mem_copy_benchmark(struct test_configure *cfg, bool 
is_dma)
                lcores[i]->scenario_id = cfg->scenario_id;
                lcores[i]->lcore_id = lcore_id;
 
-               if (is_dma)
-                       rte_eal_remote_launch(do_dma_mem_copy, (void 
*)(lcores[i]), lcore_id);
-               else
+               if (cfg->is_sg) {
+                       lcores[i]->src_ptrs = cfg->src_ptrs;
+                       lcores[i]->dst_ptrs = cfg->dst_ptrs;
+                       lcores[i]->src_sges = src_sges + (nr_sgsrc / nb_workers 
* i);
+                       lcores[i]->dst_sges = dst_sges + (nr_sgdst / nb_workers 
* i);
+               }
+
+               if (cfg->is_dma) {
+                       if (!cfg->is_sg)
+                               rte_eal_remote_launch(do_dma_plain_mem_copy, 
(void *)(lcores[i]),
+                                       lcore_id);
+                       else
+                               rte_eal_remote_launch(do_dma_sg_mem_copy, (void 
*)(lcores[i]),
+                                       lcore_id);
+               } else {
                        rte_eal_remote_launch(do_cpu_mem_copy, (void 
*)(lcores[i]), lcore_id);
+               }
        }
 
        while (1) {
@@ -538,13 +699,53 @@ mem_copy_benchmark(struct test_configure *cfg, bool 
is_dma)
 
        rte_eal_mp_wait_lcore();
 
-       for (i = 0; i < (nr_buf / nb_workers) * nb_workers; i++) {
-               if (memcmp(rte_pktmbuf_mtod(srcs[i], void *),
-                          rte_pktmbuf_mtod(dsts[i], void *),
-                          cfg->buf_size.cur) != 0) {
-                       printf("Copy validation fails for buffer number %d\n", 
i);
-                       ret = -1;
-                       goto out;
+       if (!cfg->is_sg) {
+               for (i = 0; i < (nr_buf / nb_workers) * nb_workers; i++) {
+                       if (memcmp(rte_pktmbuf_mtod(srcs[i], void *),
+                                       rte_pktmbuf_mtod(dsts[i], void *),
+                                       cfg->buf_size.cur) != 0) {
+                               printf("Copy validation fails for buffer number 
%d\n", i);
+                               ret = -1;
+                               goto out;
+                       }
+               }
+       } else {
+               size_t src_remsz = buf_size % cfg->src_ptrs;
+               size_t dst_remsz = buf_size % cfg->dst_ptrs;
+               size_t src_sz = buf_size / cfg->src_ptrs;
+               size_t dst_sz = buf_size / cfg->dst_ptrs;
+               uint8_t src[buf_size], dst[buf_size];
+               uint8_t *sbuf, *dbuf, *ptr;
+
+               for (i = 0; i < (nr_buf / RTE_MAX(cfg->src_ptrs, 
cfg->dst_ptrs)); i++) {
+                       sbuf = src;
+                       dbuf = dst;
+                       ptr = NULL;
+
+                       for (j = 0; j < cfg->src_ptrs; j++) {
+                               ptr = rte_pktmbuf_mtod(srcs[i * cfg->src_ptrs + 
j], uint8_t *);
+                               memcpy(sbuf, ptr, src_sz);
+                               sbuf += src_sz;
+                       }
+
+                       if (src_remsz)
+                               memcpy(sbuf, ptr + src_sz, src_remsz);
+
+                       for (j = 0; j < cfg->dst_ptrs; j++) {
+                               ptr = rte_pktmbuf_mtod(dsts[i * cfg->dst_ptrs + 
j], uint8_t *);
+                               memcpy(dbuf, ptr, dst_sz);
+                               dbuf += dst_sz;
+                       }
+
+                       if (dst_remsz)
+                               memcpy(dbuf, ptr + dst_sz, dst_remsz);
+
+                       if (memcmp(src, dst, buf_size) != 0) {
+                               printf("SG Copy validation fails for buffer 
number %d\n",
+                                       i * cfg->src_ptrs);
+                               ret = -1;
+                               goto out;
+                       }
                }
        }
 
@@ -555,10 +756,8 @@ mem_copy_benchmark(struct test_configure *cfg, bool is_dma)
                calc_result(buf_size, nr_buf, nb_workers, test_secs,
                        lcores[i]->worker_info.test_cpl,
                        &memory, &avg_cycles, &bandwidth, &mops);
-               output_result(cfg->scenario_id, lcores[i]->lcore_id,
-                                       lcores[i]->dma_name, 
cfg->ring_size.cur, kick_batch,
-                                       avg_cycles, buf_size, nr_buf / 
nb_workers, memory,
-                                       bandwidth, mops, is_dma);
+               output_result(cfg, lcores[i], kick_batch, avg_cycles, buf_size,
+                       nr_buf / nb_workers, memory, bandwidth, mops);
                mops_total += mops;
                bandwidth_total += bandwidth;
                avg_cycles_total += avg_cycles;
@@ -601,13 +800,20 @@ mem_copy_benchmark(struct test_configure *cfg, bool 
is_dma)
        rte_mempool_free(dst_pool);
        dst_pool = NULL;
 
+       /* free sges for mbufs */
+       rte_free(src_sges);
+       src_sges = NULL;
+
+       rte_free(dst_sges);
+       dst_sges = NULL;
+
        /* free the worker parameters */
        for (i = 0; i < nb_workers; i++) {
                rte_free(lcores[i]);
                lcores[i] = NULL;
        }
 
-       if (is_dma) {
+       if (cfg->is_dma) {
                for (i = 0; i < nb_workers; i++) {
                        printf("Stopping dmadev %d\n", ldm->dma_ids[i]);
                        rte_dma_stop(ldm->dma_ids[i]);
diff --git a/app/test-dma-perf/config.ini b/app/test-dma-perf/config.ini
index cddcf93c6e..f460b93414 100644
--- a/app/test-dma-perf/config.ini
+++ b/app/test-dma-perf/config.ini
@@ -9,6 +9,8 @@
 ; "buf_size" denotes the memory size of a single operation.
 ; "dma_ring_size" denotes the dma ring buffer size. It should be must be a 
power of two, and between
 ;  64 and 4096.
+; "dma_ptrs_src" denotes number of source segments.
+; "dma_ptrs_dst" denotes number of destination segments.
 ; "kick_batch" denotes the dma operation batch size, and should be greater 
than 1 normally.
 
 ; The format for variables is variable=first,last,increment,ADD|MUL.
@@ -69,6 +71,21 @@ lcore_dma=lcore10@0000:00:04.2, lcore11@0000:00:04.3
 eal_args=--in-memory --file-prefix=test
 
 [case2]
+type=DMA_MEM_COPY
+mem_size=10
+buf_size=64,8192,2,MUL
+dma_ring_size=1024
+dma_ptrs_src=4
+dma_ptrs_dst=1
+kick_batch=32
+src_numa_node=0
+dst_numa_node=0
+cache_flush=0
+test_seconds=2
+lcore_dma=lcore10@0000:00:04.2, lcore11@0000:00:04.3
+eal_args=--in-memory --file-prefix=test
+
+[case3]
 skip=1
 type=DMA_MEM_COPY
 direction=2
@@ -88,7 +105,7 @@ test_seconds=2
 lcore_dma=lcore10@0000:00:04.2, lcore11@0000:00:04.3
 eal_args=--in-memory --file-prefix=test
 
-[case3]
+[case4]
 type=CPU_MEM_COPY
 mem_size=10
 buf_size=64,8192,2,MUL
diff --git a/app/test-dma-perf/main.c b/app/test-dma-perf/main.c
index 1d54173a9c..e81eca14e1 100644
--- a/app/test-dma-perf/main.c
+++ b/app/test-dma-perf/main.c
@@ -107,10 +107,8 @@ run_test_case(struct test_configure *case_cfg)
 
        switch (case_cfg->test_type) {
        case TEST_TYPE_DMA_MEM_COPY:
-               ret = mem_copy_benchmark(case_cfg, true);
-               break;
        case TEST_TYPE_CPU_MEM_COPY:
-               ret = mem_copy_benchmark(case_cfg, false);
+               ret = mem_copy_benchmark(case_cfg);
                break;
        default:
                printf("Unknown test type. %s\n", case_cfg->test_type_str);
@@ -340,7 +338,8 @@ load_configs(const char *path)
        const char *case_type;
        const char *transfer_dir;
        const char *lcore_dma;
-       const char *mem_size_str, *buf_size_str, *ring_size_str, 
*kick_batch_str;
+       const char *mem_size_str, *buf_size_str, *ring_size_str, 
*kick_batch_str,
+               *src_ptrs_str, *dst_ptrs_str;
        const char *skip;
        const char *raddr, *scoreid, *dcoreid, *vfid, *pfid;
        int args_nr, nb_vp;
@@ -455,6 +454,7 @@ load_configs(const char *path)
                        test_case->dcoreid = (uint8_t)atoi(dcoreid);
                }
 
+               test_case->is_dma = is_dma;
                test_case->src_numa_node = 
(int)atoi(rte_cfgfile_get_entry(cfgfile,
                                                                section_name, 
"src_numa_node"));
                test_case->dst_numa_node = 
(int)atoi(rte_cfgfile_get_entry(cfgfile,
@@ -489,6 +489,32 @@ load_configs(const char *path)
                        } else if (args_nr == 4)
                                nb_vp++;
 
+                       src_ptrs_str = rte_cfgfile_get_entry(cfgfile, 
section_name,
+                                                               "dma_ptrs_src");
+                       if (src_ptrs_str != NULL) {
+                               test_case->src_ptrs = 
(int)atoi(rte_cfgfile_get_entry(cfgfile,
+                                                               section_name, 
"dma_ptrs_src"));
+                       }
+
+                       dst_ptrs_str = rte_cfgfile_get_entry(cfgfile, 
section_name,
+                                                               "dma_ptrs_dst");
+                       if (dst_ptrs_str != NULL) {
+                               test_case->dst_ptrs = 
(int)atoi(rte_cfgfile_get_entry(cfgfile,
+                                                               section_name, 
"dma_ptrs_dst"));
+                       }
+
+                       if ((src_ptrs_str != NULL && dst_ptrs_str == NULL) ||
+                           (src_ptrs_str == NULL && dst_ptrs_str != NULL)) {
+                               printf("parse dma_ptrs_src, dma_ptrs_dst error 
in case %d.\n",
+                                       i + 1);
+                               test_case->is_valid = false;
+                               continue;
+                       } else if (src_ptrs_str != NULL && dst_ptrs_str != 
NULL) {
+                               test_case->is_sg = true;
+                       } else {
+                               test_case->is_sg = false;
+                       }
+
                        kick_batch_str = rte_cfgfile_get_entry(cfgfile, 
section_name, "kick_batch");
                        args_nr = parse_entry(kick_batch_str, 
&test_case->kick_batch);
                        if (args_nr < 0) {
diff --git a/app/test-dma-perf/main.h b/app/test-dma-perf/main.h
index 7dcaa166f2..31e0bf71c9 100644
--- a/app/test-dma-perf/main.h
+++ b/app/test-dma-perf/main.h
@@ -48,11 +48,14 @@ struct test_configure {
        uint16_t dst_numa_node;
        uint16_t opcode;
        bool is_dma;
+       bool is_sg;
        struct lcore_dma_map_t lcore_dma_map;
        struct test_configure_entry mem_size;
        struct test_configure_entry buf_size;
        struct test_configure_entry ring_size;
        struct test_configure_entry kick_batch;
+       uint8_t src_ptrs;
+       uint8_t dst_ptrs;
        uint8_t cache_flush;
        uint32_t nr_buf;
        uint16_t test_secs;
@@ -65,6 +68,6 @@ struct test_configure {
        uintptr_t raddr;
 };
 
-int mem_copy_benchmark(struct test_configure *cfg, bool is_dma);
+int mem_copy_benchmark(struct test_configure *cfg);
 
 #endif /* MAIN_H */
-- 
2.25.1

Reply via email to