yong.hu...@smartx.com writes:

> From: Hyman Huang <yong.hu...@smartx.com>
>
> Guestperf tool does not cover the multifd compression option
> currently, it is worth supporting so that developers can
> analysis the migration performance with different
> compression algorithms.
>
> Multifd support 4 compression algorithms currently:
> zlib, zstd, qpl, uadk
>
> To request that multifd with the specified compression
> algorithm such as zlib:
> $ ./tests/migration/guestperf.py \
>     --multifd --multifd-channels 4 --multifd-compression zlib \
>     --output output.json

Aren't you hitting this bug?

/init (00001): INFO: 1722970659523ms copied 1 GB in 00382ms                     
                                                                                
                               
/init (00001): INFO: 1722970659896ms copied 1 GB in 00372ms                     
                                                                                
                               
/init (00001): INFO: 1722970660279ms copied 1 GB in 00382ms                     
                                                                                
                               
qemu-system-x86_64: multifd_send_pages: channel 0 has already quit!             
                                                                                
                               
qemu-system-x86_64: Unable to write to socket: Broken pipe                      
                                                                                
                               
                                                                                
                                                                                
                               
qemu-system-x86_64: check_section_footer: Read section footer failed: -5        
               
qemu-system-x86_64: load of migration failed: Invalid argument

>
> To run the entire standardized set of multifd compression
> comparisons, with unix migration:
> $ ./tests/migration/guestperf-batch.py \
>     --dst-host localhost --transport unix \
>     --filter compr-multifd-compression* --output outputdir
>
> Signed-off-by: Hyman Huang <yong.hu...@smartx.com>
> ---
>  tests/migration/guestperf/comparison.py | 13 +++++++++++++
>  tests/migration/guestperf/engine.py     | 12 ++++++++++++
>  tests/migration/guestperf/scenario.py   |  7 +++++--
>  tests/migration/guestperf/shell.py      |  3 +++

While here, if you'd like to move the whole guestperf into
scripts/migration and make initrd-stress.img built by default I'd
appreciate it.

>  4 files changed, 33 insertions(+), 2 deletions(-)
>
> diff --git a/tests/migration/guestperf/comparison.py 
> b/tests/migration/guestperf/comparison.py
> index 40e9d2eb1d..71208e8540 100644
> --- a/tests/migration/guestperf/comparison.py
> +++ b/tests/migration/guestperf/comparison.py
> @@ -158,4 +158,17 @@ def __init__(self, name, scenarios):
>          Scenario("compr-dirty-limit-50MB",
>                   dirty_limit=True, vcpu_dirty_limit=50),
>      ]),
> +
> +    # Looking at effect of multifd with
> +    # different compression algorithm

s/algorithm/algorithms/

> +    Comparison("compr-multifd-compression", scenarios = [
> +        Scenario("compr-multifd-compression-zlib",
> +                 multifd=True, multifd_channels=4, 
> multifd_compression="zlib"),
> +        Scenario("compr-multifd-compression-zstd",
> +                 multifd=True, multifd_channels=4, 
> multifd_compression="zstd"),
> +        Scenario("compr-multifd-compression-qpl",
> +                 multifd=True, multifd_channels=4, 
> multifd_compression="qpl"),
> +        Scenario("compr-multifd-compression-uadk",
> +                 multifd=True, multifd_channels=4, 
> multifd_compression="uadk"),
> +    ]),
>  ]
> diff --git a/tests/migration/guestperf/engine.py 
> b/tests/migration/guestperf/engine.py
> index 608d7270f6..883a7b8ab6 100644
> --- a/tests/migration/guestperf/engine.py
> +++ b/tests/migration/guestperf/engine.py
> @@ -31,6 +31,8 @@
>                               '..', '..', '..', 'python'))
>  from qemu.machine import QEMUMachine
>  
> +# multifd supported compressoin algorithms

compression

> +MULTIFD_CMP_ALGS = ("zlib", "zstd", "qpl", "uadk")
>  
>  class Engine(object):
>  
> @@ -205,6 +207,16 @@ def _migrate(self, hardware, scenario, src, dst, 
> connect_uri):
>              resp = dst.cmd("migrate-set-parameters",
>                             multifd_channels=scenario._multifd_channels)
>  
> +            if scenario._multifd_compression:
> +                if scenario._multifd_compression not in MULTIFD_CMP_ALGS:
> +                    raise Exception("unsupported multifd compression "
> +                                    "algorithm: %s" %
> +                                    scenario._multifd_compression)
> +                resp = src.command("migrate-set-parameters",
> +                    multifd_compression=scenario._multifd_compression)
> +                resp = dst.command("migrate-set-parameters",
> +                    multifd_compression=scenario._multifd_compression)

Should these be src.cmd() and dst.cmd()?

> +
>          if scenario._dirty_limit:
>              if not hardware._dirty_ring_size:
>                  raise Exception("dirty ring size must be configured when "
> diff --git a/tests/migration/guestperf/scenario.py 
> b/tests/migration/guestperf/scenario.py
> index 154c4f5d5f..4be7fafebf 100644
> --- a/tests/migration/guestperf/scenario.py
> +++ b/tests/migration/guestperf/scenario.py
> @@ -30,7 +30,7 @@ def __init__(self, name,
>                   auto_converge=False, auto_converge_step=10,
>                   compression_mt=False, compression_mt_threads=1,
>                   compression_xbzrle=False, compression_xbzrle_cache=10,
> -                 multifd=False, multifd_channels=2,
> +                 multifd=False, multifd_channels=2, multifd_compression="",
>                   dirty_limit=False, x_vcpu_dirty_limit_period=500,
>                   vcpu_dirty_limit=1):
>  
> @@ -61,6 +61,7 @@ def __init__(self, name,
>  
>          self._multifd = multifd
>          self._multifd_channels = multifd_channels
> +        self._multifd_compression = multifd_compression
>  
>          self._dirty_limit = dirty_limit
>          self._x_vcpu_dirty_limit_period = x_vcpu_dirty_limit_period
> @@ -85,6 +86,7 @@ def serialize(self):
>              "compression_xbzrle_cache": self._compression_xbzrle_cache,
>              "multifd": self._multifd,
>              "multifd_channels": self._multifd_channels,
> +            "multifd_compression": self._multifd_compression,
>              "dirty_limit": self._dirty_limit,
>              "x_vcpu_dirty_limit_period": self._x_vcpu_dirty_limit_period,
>              "vcpu_dirty_limit": self._vcpu_dirty_limit,
> @@ -109,4 +111,5 @@ def deserialize(cls, data):
>              data["compression_xbzrle"],
>              data["compression_xbzrle_cache"],
>              data["multifd"],
> -            data["multifd_channels"])
> +            data["multifd_channels"],
> +            data["multifd_compression"])
> diff --git a/tests/migration/guestperf/shell.py 
> b/tests/migration/guestperf/shell.py
> index c85d89efec..1452eb8a33 100644
> --- a/tests/migration/guestperf/shell.py
> +++ b/tests/migration/guestperf/shell.py
> @@ -130,6 +130,8 @@ def __init__(self):
>                              action="store_true")
>          parser.add_argument("--multifd-channels", dest="multifd_channels",
>                              default=2, type=int)
> +        parser.add_argument("--multifd-compression", 
> dest="multifd_compression",
> +                            default="")
>  
>          parser.add_argument("--dirty-limit", dest="dirty_limit", 
> default=False,
>                              action="store_true")
> @@ -166,6 +168,7 @@ def get_scenario(self, args):
>  
>                          multifd=args.multifd,
>                          multifd_channels=args.multifd_channels,
> +                        multifd_compression=args.multifd_compression,
>  
>                          dirty_limit=args.dirty_limit,
>                          x_vcpu_dirty_limit_period=\

Reply via email to