On Fri, Oct 18, 2024 at 3:33 AM Peter Xu <pet...@redhat.com> wrote:

> On Thu, Oct 17, 2024 at 02:42:54PM +0800, yong.hu...@smartx.com wrote:
> > +void cpu_throttle_dirty_sync_timer_tick(void *opaque)
> > +{
> > +    static uint64_t prev_sync_cnt;
>
> We may need to reset this in case migration got cancelled and invoked
> again, to make sure it keeps working in the 2nd run.
>
> > +    uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> > +
> > +    /*
> > +     * The first iteration copies all memory anyhow and has no
> > +     * effect on guest performance, therefore omit it to avoid
> > +     * paying extra for the sync penalty.
> > +     */
> > +    if (sync_cnt <= 1) {
> > +        goto end;
> > +    }
> > +
> > +    if (sync_cnt == prev_sync_cnt) {
> > +        trace_cpu_throttle_dirty_sync();
> > +        WITH_RCU_READ_LOCK_GUARD() {
> > +            migration_bitmap_sync_precopy(false);
> > +        }
> > +    }
> > +
> > +end:
> > +    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> > +
> > +    timer_mod(throttle_dirty_sync_timer,
> > +        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> > +            CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> > +}
>
> Please both of you have a look on whether you agree I squash below into
> this patch when merge:
>

Thanks for the fixup, it looks good to me.


>
> ===8<===
> From 84a2544eab73e35dbd35fed3b1440169915f9aa4 Mon Sep 17 00:00:00 2001
> From: Peter Xu <pet...@redhat.com>
> Date: Thu, 17 Oct 2024 15:27:19 -0400
> Subject: [PATCH] fixup! migration: Support periodic RAMBlock dirty bitmap
> sync
>
> Signed-off-by: Peter Xu <pet...@redhat.com>
> ---
>  migration/cpu-throttle.c | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
>
> diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
> index 342681cdd4..3df287d8d3 100644
> --- a/migration/cpu-throttle.c
> +++ b/migration/cpu-throttle.c
> @@ -36,6 +36,7 @@
>  static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
>  static unsigned int throttle_percentage;
>  static bool throttle_dirty_sync_timer_active;
> +static uint64_t throttle_dirty_sync_count_prev;
>
>  #define CPU_THROTTLE_PCT_MIN 1
>  #define CPU_THROTTLE_PCT_MAX 99
> @@ -133,7 +134,6 @@ int cpu_throttle_get_percentage(void)
>
>  void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>  {
> -    static uint64_t prev_sync_cnt;
>      uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
>
>      /*
> @@ -145,7 +145,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>          goto end;
>      }
>
> -    if (sync_cnt == prev_sync_cnt) {
> +    if (sync_cnt == throttle_dirty_sync_count_prev) {
>          trace_cpu_throttle_dirty_sync();
>          WITH_RCU_READ_LOCK_GUARD() {
>              migration_bitmap_sync_precopy(false);
> @@ -153,7 +153,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>      }
>
>  end:
> -    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> +    throttle_dirty_sync_count_prev =
> stat64_get(&mig_stats.dirty_sync_count);
>
>      timer_mod(throttle_dirty_sync_timer,
>          qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> @@ -171,6 +171,11 @@ void cpu_throttle_dirty_sync_timer(bool enable)
>
>      if (enable) {
>          if (!cpu_throttle_dirty_sync_active()) {
> +            /*
> +             * Always reset the dirty sync count cache, in case migration
> +             * was cancelled once.
> +             */
> +            throttle_dirty_sync_count_prev = 0;
>              timer_mod(throttle_dirty_sync_timer,
>                  qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
>                      CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> --
> 2.45.0
>
> --
> Peter Xu
>
>
Acked-by: Hyman Huang <yong.hu...@smartx.com>

-- 
Best regards

Reply via email to