Hi Chenbo,

On 1/8/21 8:20 AM, Xia, Chenbo wrote:
> Hi Maxime,
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coque...@redhat.com>
>> Sent: Tuesday, December 22, 2020 9:57 PM
>> To: dev@dpdk.org; Xia, Chenbo <chenbo....@intel.com>; amore...@redhat.com
>> Cc: Maxime Coquelin <maxime.coque...@redhat.com>
>> Subject: [PATCH 2/3] vhost: move dirty logging cache out of the virtqueue
>>
>> This patch moves the per-virtqueue's dirty logging cache
>> out of the virtqueue struct, by allocating it dynamically
>> only when live-migration is enabled.
>>
>> It saves 8 cachelines in vhost_virtqueue struct.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
>> ---
>>  lib/librte_vhost/vhost.c      | 12 ++++++++++++
>>  lib/librte_vhost/vhost.h      |  2 +-
>>  lib/librte_vhost/vhost_user.c | 25 +++++++++++++++++++++++++
>>  3 files changed, 38 insertions(+), 1 deletion(-)
>>
>> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
>> index 4e5df862aa..ec6459b2d1 100644
>> --- a/lib/librte_vhost/vhost.c
>> +++ b/lib/librte_vhost/vhost.c
>> @@ -144,6 +144,10 @@ __vhost_log_cache_sync(struct virtio_net *dev, struct
>> vhost_virtqueue *vq)
>>      if (unlikely(!dev->log_base))
>>              return;
>>
>> +    /* No cache, nothing to sync */
>> +    if (unlikely(!vq->log_cache))
>> +            return;
>> +
>>      rte_smp_wmb();
>>
>>      log_base = (unsigned long *)(uintptr_t)dev->log_base;
>> @@ -176,6 +180,14 @@ vhost_log_cache_page(struct virtio_net *dev, struct
>> vhost_virtqueue *vq,
>>      uint32_t offset = page / (sizeof(unsigned long) << 3);
>>      int i;
>>
>> +    if (unlikely(!vq->log_cache)) {
>> +            /* No logging cache allocated, write dirty log map directly */
>> +            rte_smp_wmb();
>> +            vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
>> +
>> +            return;
>> +    }
>> +
>>      for (i = 0; i < vq->log_cache_nb_elem; i++) {
>>              struct log_cache_entry *elem = vq->log_cache + i;
>>
>> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
>> index d132e4ae54..e2f14034b4 100644
>> --- a/lib/librte_vhost/vhost.h
>> +++ b/lib/librte_vhost/vhost.h
>> @@ -183,7 +183,7 @@ struct vhost_virtqueue {
>>      bool                    used_wrap_counter;
>>      bool                    avail_wrap_counter;
>>
>> -    struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
>> +    struct log_cache_entry *log_cache;
>>      uint16_t log_cache_nb_elem;
>>
>>      rte_rwlock_t    iotlb_lock;
>> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
>> index 45c8ac09da..7ac3963a07 100644
>> --- a/lib/librte_vhost/vhost_user.c
>> +++ b/lib/librte_vhost/vhost_user.c
>> @@ -1978,6 +1978,11 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
>>      rte_free(vq->batch_copy_elems);
>>      vq->batch_copy_elems = NULL;
>>
>> +    if (vq->log_cache) {
>> +            rte_free(vq->log_cache);
>> +            vq->log_cache = NULL;
>> +    }
>> +
> 
> I think we'd better check and free the log cache in function free_vq() too?
> It is possible that during migration, vhost destroys, right?

Correct, I will do it too in free_vq().

Thanks!
Maxime

> Thanks,
> Chenbo
> 
>>      msg->size = sizeof(msg->payload.state);
>>      msg->fd_num = 0;
>>
>> @@ -2077,6 +2082,7 @@ vhost_user_set_log_base(struct virtio_net **pdev, 
>> struct
>> VhostUserMsg *msg,
>>      int fd = msg->fds[0];
>>      uint64_t size, off;
>>      void *addr;
>> +    uint32_t i;
>>
>>      if (validate_msg_fds(msg, 1) != 0)
>>              return RTE_VHOST_MSG_RESULT_ERR;
>> @@ -2130,6 +2136,25 @@ vhost_user_set_log_base(struct virtio_net **pdev,
>> struct VhostUserMsg *msg,
>>      dev->log_base = dev->log_addr + off;
>>      dev->log_size = size;
>>
>> +    for (i = 0; i < dev->nr_vring; i++) {
>> +            struct vhost_virtqueue *vq = dev->virtqueue[i];
>> +
>> +            if (vq->log_cache) {
>> +                    rte_free(vq->log_cache);
>> +                    vq->log_cache = NULL;
>> +            }
>> +            vq->log_cache_nb_elem = 0;
>> +            vq->log_cache = rte_zmalloc("vq log cache",
>> +                            sizeof(struct log_cache_entry) * 
>> VHOST_LOG_CACHE_NR,
>> +                            0);
>> +            /*
>> +             * If log cache alloc fail, don't fail migration, but no
>> +             * caching will be done, which will impact performance
>> +             */
>> +            if (!vq->log_cache)
>> +                    VHOST_LOG_CONFIG(ERR, "Failed to allocate VQ logging
>> cache\n");
>> +    }
>> +
>>      /*
>>       * The spec is not clear about it (yet), but QEMU doesn't expect
>>       * any payload in the reply.
>> --
>> 2.29.2
> 

Reply via email to