Re: [PATCH v2 07/12] virtio: queue_reset: pci: support VIRTIO_F_RING_RESET

2022-01-20 Thread Michael S. Tsirkin
On Thu, Jan 20, 2022 at 02:42:58PM +0800, Xuan Zhuo wrote:
> This patch implements virtio pci support for QUEUE RESET.
> 
> Performing reset on a queue is divided into two steps:
> 
> 1. reset_vq: reset one vq
> 2. enable_reset_vq: re-enable the reset queue
> 
> In the first step, these tasks will be completed:
>1. notify the hardware queue to reset
>2. recycle the buffer from vq
>3. delete the vq
> 
> When deleting a vq, vp_del_vq() will be called to release all the memory
> of the vq. But this does not affect the process of deleting vqs, because
> that is based on the queue to release all the vqs. During this process,
> the vq has been removed from the queue.
> 
> When deleting vq, info and vq will be released, and I save msix_vec in
> vp_dev->vqs[queue_index]. When re-enable, the current msix_vec can be
> reused. And based on intx_enabled to determine which method to use to
> enable this queue.
> 
> Signed-off-by: Xuan Zhuo 

There's something I don't understand here. It looks like
you assume that when you reset a queue, you also
reset the mapping from queue to event vector.
The spec does not say it should, and I don't think it's
useful to extend spec to do it - we already have a simple
way to tweak the mapping.

Avoid doing that, and things will be much easier, with no need
to interact with a transport, won't they?


> ---
>  drivers/virtio/virtio_pci_common.c | 49 
>  drivers/virtio/virtio_pci_common.h |  4 ++
>  drivers/virtio/virtio_pci_modern.c | 73 ++
>  3 files changed, 126 insertions(+)
> 
> diff --git a/drivers/virtio/virtio_pci_common.c 
> b/drivers/virtio/virtio_pci_common.c
> index 5afe207ce28a..28b5ffde4621 100644
> --- a/drivers/virtio/virtio_pci_common.c
> +++ b/drivers/virtio/virtio_pci_common.c
> @@ -464,6 +464,55 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned 
> nvqs,
>   return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
>  }
>  
> +#define VQ_IS_DELETED(vp_dev, idx) ((unsigned long)vp_dev->vqs[idx] & 1)
> +#define VQ_RESET_MSIX_VEC(vp_dev, idx) ((unsigned long)vp_dev->vqs[idx] >> 2)
> +#define VQ_RESET_MARK(msix_vec) ((void *)(long)((msix_vec << 2) + 1))
> +
> +void vp_del_reset_vq(struct virtio_device *vdev, u16 queue_index)
> +{
> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> + struct virtio_pci_vq_info *info;
> + u16 msix_vec;
> +
> + info = vp_dev->vqs[queue_index];
> +
> + msix_vec = info->msix_vector;
> +
> + /* delete vq */
> + vp_del_vq(info->vq);
> +
> + /* Mark the vq has been deleted, and save the msix_vec. */
> + vp_dev->vqs[queue_index] = VQ_RESET_MARK(msix_vec);
> +}
> +
> +struct virtqueue *vp_enable_reset_vq(struct virtio_device *vdev,
> +  int queue_index,
> +  vq_callback_t *callback,
> +  const char *name,
> +  const bool ctx)
> +{
> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> + struct virtqueue *vq;
> + u16 msix_vec;
> +
> + if (!VQ_IS_DELETED(vp_dev, queue_index))
> + return ERR_PTR(-EPERM);
> +
> + msix_vec = VQ_RESET_MSIX_VEC(vp_dev, queue_index);
> +
> + if (vp_dev->intx_enabled)
> + vq = vp_setup_vq(vdev, queue_index, callback, name, ctx,
> +  VIRTIO_MSI_NO_VECTOR);
> + else
> + vq = vp_enable_vq_msix(vdev, queue_index, callback, name, ctx,
> +msix_vec);
> +
> + if (IS_ERR(vq))
> + vp_dev->vqs[queue_index] = VQ_RESET_MARK(msix_vec);
> +
> + return vq;
> +}
> +
>  const char *vp_bus_name(struct virtio_device *vdev)
>  {
>   struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> diff --git a/drivers/virtio/virtio_pci_common.h 
> b/drivers/virtio/virtio_pci_common.h
> index 23f6c5c678d5..96c13b1398f8 100644
> --- a/drivers/virtio/virtio_pci_common.h
> +++ b/drivers/virtio/virtio_pci_common.h
> @@ -115,6 +115,10 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned 
> nvqs,
>   struct virtqueue *vqs[], vq_callback_t *callbacks[],
>   const char * const names[], const bool *ctx,
>   struct irq_affinity *desc);
> +void vp_del_reset_vq(struct virtio_device *vdev, u16 queue_index);
> +struct virtqueue *vp_enable_reset_vq(struct virtio_device *vdev, int 
> queue_index,
> +  vq_callback_t *callback, const char *name,
> +  const bool ctx);
>  const char *vp_bus_name(struct virtio_device *vdev);
>  
>  /* Setup the affinity for a virtqueue:
> diff --git a/drivers/virtio/virtio_pci_modern.c 
> b/drivers/virtio/virtio_pci_modern.c
> index 5455bc041fb6..fbf87239c920 100644
> --- a/drivers/virtio/virtio_pci_modern.c
> +++ b/drivers/virtio/virtio_pci_modern.c
> @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device 
> *vdev, u64 feat

Re: [PATCH v2 07/12] virtio: queue_reset: pci: support VIRTIO_F_RING_RESET

2022-01-20 Thread Xuan Zhuo
On Thu, 20 Jan 2022 05:55:14 -0500, Michael S. Tsirkin  wrote:
> On Thu, Jan 20, 2022 at 02:42:58PM +0800, Xuan Zhuo wrote:
> > This patch implements virtio pci support for QUEUE RESET.
> >
> > Performing reset on a queue is divided into two steps:
> >
> > 1. reset_vq: reset one vq
> > 2. enable_reset_vq: re-enable the reset queue
> >
> > In the first step, these tasks will be completed:
> >1. notify the hardware queue to reset
> >2. recycle the buffer from vq
> >3. delete the vq
> >
> > When deleting a vq, vp_del_vq() will be called to release all the memory
> > of the vq. But this does not affect the process of deleting vqs, because
> > that is based on the queue to release all the vqs. During this process,
> > the vq has been removed from the queue.
> >
> > When deleting vq, info and vq will be released, and I save msix_vec in
> > vp_dev->vqs[queue_index]. When re-enable, the current msix_vec can be
> > reused. And based on intx_enabled to determine which method to use to
> > enable this queue.
> >
> > Signed-off-by: Xuan Zhuo 
>
> There's something I don't understand here. It looks like
> you assume that when you reset a queue, you also
> reset the mapping from queue to event vector.
> The spec does not say it should, and I don't think it's
> useful to extend spec to do it - we already have a simple
> way to tweak the mapping.
>

Sorry, what is the already existing method you are referring to, I didn't find
it.

I think you mean that we don't have to reset the event vector, I think you are
right.



Thanks.

> Avoid doing that, and things will be much easier, with no need
> to interact with a transport, won't they?
>
>
> > ---
> >  drivers/virtio/virtio_pci_common.c | 49 
> >  drivers/virtio/virtio_pci_common.h |  4 ++
> >  drivers/virtio/virtio_pci_modern.c | 73 ++
> >  3 files changed, 126 insertions(+)
> >
> > diff --git a/drivers/virtio/virtio_pci_common.c 
> > b/drivers/virtio/virtio_pci_common.c
> > index 5afe207ce28a..28b5ffde4621 100644
> > --- a/drivers/virtio/virtio_pci_common.c
> > +++ b/drivers/virtio/virtio_pci_common.c
> > @@ -464,6 +464,55 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned 
> > nvqs,
> > return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
> >  }
> >
> > +#define VQ_IS_DELETED(vp_dev, idx) ((unsigned long)vp_dev->vqs[idx] & 1)
> > +#define VQ_RESET_MSIX_VEC(vp_dev, idx) ((unsigned long)vp_dev->vqs[idx] >> 
> > 2)
> > +#define VQ_RESET_MARK(msix_vec) ((void *)(long)((msix_vec << 2) + 1))
> > +
> > +void vp_del_reset_vq(struct virtio_device *vdev, u16 queue_index)
> > +{
> > +   struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > +   struct virtio_pci_vq_info *info;
> > +   u16 msix_vec;
> > +
> > +   info = vp_dev->vqs[queue_index];
> > +
> > +   msix_vec = info->msix_vector;
> > +
> > +   /* delete vq */
> > +   vp_del_vq(info->vq);
> > +
> > +   /* Mark the vq has been deleted, and save the msix_vec. */
> > +   vp_dev->vqs[queue_index] = VQ_RESET_MARK(msix_vec);
> > +}
> > +
> > +struct virtqueue *vp_enable_reset_vq(struct virtio_device *vdev,
> > +int queue_index,
> > +vq_callback_t *callback,
> > +const char *name,
> > +const bool ctx)
> > +{
> > +   struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > +   struct virtqueue *vq;
> > +   u16 msix_vec;
> > +
> > +   if (!VQ_IS_DELETED(vp_dev, queue_index))
> > +   return ERR_PTR(-EPERM);
> > +
> > +   msix_vec = VQ_RESET_MSIX_VEC(vp_dev, queue_index);
> > +
> > +   if (vp_dev->intx_enabled)
> > +   vq = vp_setup_vq(vdev, queue_index, callback, name, ctx,
> > +VIRTIO_MSI_NO_VECTOR);
> > +   else
> > +   vq = vp_enable_vq_msix(vdev, queue_index, callback, name, ctx,
> > +  msix_vec);
> > +
> > +   if (IS_ERR(vq))
> > +   vp_dev->vqs[queue_index] = VQ_RESET_MARK(msix_vec);
> > +
> > +   return vq;
> > +}
> > +
> >  const char *vp_bus_name(struct virtio_device *vdev)
> >  {
> > struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > diff --git a/drivers/virtio/virtio_pci_common.h 
> > b/drivers/virtio/virtio_pci_common.h
> > index 23f6c5c678d5..96c13b1398f8 100644
> > --- a/drivers/virtio/virtio_pci_common.h
> > +++ b/drivers/virtio/virtio_pci_common.h
> > @@ -115,6 +115,10 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned 
> > nvqs,
> > struct virtqueue *vqs[], vq_callback_t *callbacks[],
> > const char * const names[], const bool *ctx,
> > struct irq_affinity *desc);
> > +void vp_del_reset_vq(struct virtio_device *vdev, u16 queue_index);
> > +struct virtqueue *vp_enable_reset_vq(struct virtio_device *vdev, int 
> > queue_index,
> > +vq_callback_t *callback, const char *name,
> > +const bool ctx);
> >  co

Re: [PATCH v2 2/2] virtio: acknowledge all features before access

2022-01-20 Thread Cornelia Huck
On Tue, Jan 18 2022, "Michael S. Tsirkin"  wrote:

> The feature negotiation was designed in a way that
> makes it possible for devices to know which config
> fields will be accessed by drivers.
>
> This is broken since commit 404123c2db79 ("virtio: allow drivers to
> validate features") with fallout in at least block and net.  We have a
> partial work-around in commit 2f9a174f918e ("virtio: write back
> F_VERSION_1 before validate") which at least lets devices find out which
> format should config space have, but this is a partial fix: guests
> should not access config space without acknowledging features since
> otherwise we'll never be able to change the config space format.
>
> To fix, split finalize_features from virtio_finalize_features and
> call finalize_features with all feature bits before validation,
> and then - if validation changed any bits - once again after.
>
> Since virtio_finalize_features no longer writes out features
> rename it to virtio_features_ok - since that is what it does:
> checks that features are ok with the device.
>
> As a side effect, this also reduces the amount of hypervisor accesses -
> we now only acknowledge features once unless we are clearing any
> features when validating (which is uncommon).
>
> Cc: sta...@vger.kernel.org
> Fixes: 404123c2db79 ("virtio: allow drivers to validate features")
> Fixes: 2f9a174f918e ("virtio: write back F_VERSION_1 before validate")
> Cc: "Halil Pasic" 
> Signed-off-by: Michael S. Tsirkin 
>
> fixup! virtio: acknowledge all features before access

Leftover from rebasing?

> ---
>  drivers/virtio/virtio.c   | 39 ---
>  include/linux/virtio_config.h |  3 ++-
>  2 files changed, 24 insertions(+), 18 deletions(-)

Reviewed-by: Cornelia Huck 

Would like to see a quick sanity test from Halil, though.

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v2 07/12] virtio: queue_reset: pci: support VIRTIO_F_RING_RESET

2022-01-20 Thread Michael S. Tsirkin
On Thu, Jan 20, 2022 at 07:46:20PM +0800, Xuan Zhuo wrote:
> On Thu, 20 Jan 2022 05:55:14 -0500, Michael S. Tsirkin  
> wrote:
> > On Thu, Jan 20, 2022 at 02:42:58PM +0800, Xuan Zhuo wrote:
> > > This patch implements virtio pci support for QUEUE RESET.
> > >
> > > Performing reset on a queue is divided into two steps:
> > >
> > > 1. reset_vq: reset one vq
> > > 2. enable_reset_vq: re-enable the reset queue
> > >
> > > In the first step, these tasks will be completed:
> > >1. notify the hardware queue to reset
> > >2. recycle the buffer from vq
> > >3. delete the vq
> > >
> > > When deleting a vq, vp_del_vq() will be called to release all the memory
> > > of the vq. But this does not affect the process of deleting vqs, because
> > > that is based on the queue to release all the vqs. During this process,
> > > the vq has been removed from the queue.
> > >
> > > When deleting vq, info and vq will be released, and I save msix_vec in
> > > vp_dev->vqs[queue_index]. When re-enable, the current msix_vec can be
> > > reused. And based on intx_enabled to determine which method to use to
> > > enable this queue.
> > >
> > > Signed-off-by: Xuan Zhuo 
> >
> > There's something I don't understand here. It looks like
> > you assume that when you reset a queue, you also
> > reset the mapping from queue to event vector.
> > The spec does not say it should, and I don't think it's
> > useful to extend spec to do it - we already have a simple
> > way to tweak the mapping.
> >
> 
> Sorry, what is the already existing method you are referring to, I didn't find
> it.


Write 0x into vector number.

> I think you mean that we don't have to reset the event vector, I think you are
> right.
> 
> 
> 
> Thanks.
> 
> > Avoid doing that, and things will be much easier, with no need
> > to interact with a transport, won't they?
> >
> >
> > > ---
> > >  drivers/virtio/virtio_pci_common.c | 49 
> > >  drivers/virtio/virtio_pci_common.h |  4 ++
> > >  drivers/virtio/virtio_pci_modern.c | 73 ++
> > >  3 files changed, 126 insertions(+)
> > >
> > > diff --git a/drivers/virtio/virtio_pci_common.c 
> > > b/drivers/virtio/virtio_pci_common.c
> > > index 5afe207ce28a..28b5ffde4621 100644
> > > --- a/drivers/virtio/virtio_pci_common.c
> > > +++ b/drivers/virtio/virtio_pci_common.c
> > > @@ -464,6 +464,55 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned 
> > > nvqs,
> > >   return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
> > >  }
> > >
> > > +#define VQ_IS_DELETED(vp_dev, idx) ((unsigned long)vp_dev->vqs[idx] & 1)
> > > +#define VQ_RESET_MSIX_VEC(vp_dev, idx) ((unsigned long)vp_dev->vqs[idx] 
> > > >> 2)
> > > +#define VQ_RESET_MARK(msix_vec) ((void *)(long)((msix_vec << 2) + 1))
> > > +
> > > +void vp_del_reset_vq(struct virtio_device *vdev, u16 queue_index)
> > > +{
> > > + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > + struct virtio_pci_vq_info *info;
> > > + u16 msix_vec;
> > > +
> > > + info = vp_dev->vqs[queue_index];
> > > +
> > > + msix_vec = info->msix_vector;
> > > +
> > > + /* delete vq */
> > > + vp_del_vq(info->vq);
> > > +
> > > + /* Mark the vq has been deleted, and save the msix_vec. */
> > > + vp_dev->vqs[queue_index] = VQ_RESET_MARK(msix_vec);
> > > +}
> > > +
> > > +struct virtqueue *vp_enable_reset_vq(struct virtio_device *vdev,
> > > +  int queue_index,
> > > +  vq_callback_t *callback,
> > > +  const char *name,
> > > +  const bool ctx)
> > > +{
> > > + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > + struct virtqueue *vq;
> > > + u16 msix_vec;
> > > +
> > > + if (!VQ_IS_DELETED(vp_dev, queue_index))
> > > + return ERR_PTR(-EPERM);
> > > +
> > > + msix_vec = VQ_RESET_MSIX_VEC(vp_dev, queue_index);
> > > +
> > > + if (vp_dev->intx_enabled)
> > > + vq = vp_setup_vq(vdev, queue_index, callback, name, ctx,
> > > +  VIRTIO_MSI_NO_VECTOR);
> > > + else
> > > + vq = vp_enable_vq_msix(vdev, queue_index, callback, name, ctx,
> > > +msix_vec);
> > > +
> > > + if (IS_ERR(vq))
> > > + vp_dev->vqs[queue_index] = VQ_RESET_MARK(msix_vec);
> > > +
> > > + return vq;
> > > +}
> > > +
> > >  const char *vp_bus_name(struct virtio_device *vdev)
> > >  {
> > >   struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > diff --git a/drivers/virtio/virtio_pci_common.h 
> > > b/drivers/virtio/virtio_pci_common.h
> > > index 23f6c5c678d5..96c13b1398f8 100644
> > > --- a/drivers/virtio/virtio_pci_common.h
> > > +++ b/drivers/virtio/virtio_pci_common.h
> > > @@ -115,6 +115,10 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned 
> > > nvqs,
> > >   struct virtqueue *vqs[], vq_callback_t *callbacks[],
> > >   const char * const names[], const bool *ctx,
> > >   struct irq_affinity *desc);
> > > +void vp_del_res

Re: [PATCH v1] vhost: cache avail index in vhost_enable_notify()

2022-01-20 Thread Stefano Garzarella
On Fri, Jan 14, 2022 at 2:40 PM Michael S. Tsirkin  wrote:
>
> On Fri, Jan 14, 2022 at 02:38:16PM +0100, Stefano Garzarella wrote:
> > On Fri, Jan 14, 2022 at 07:45:35AM -0500, Michael S. Tsirkin wrote:
> > > On Fri, Jan 14, 2022 at 10:05:08AM +0100, Stefano Garzarella wrote:
> > > > In vhost_enable_notify() we enable the notifications and we read
> > > > the avail index to check if new buffers have become available in
> > > > the meantime.
> > > >
> > > > We are not caching the avail index, so when the device will call
> > > > vhost_get_vq_desc(), it will find the old value in the cache and
> > > > it will read the avail index again.
> > > >
> > > > It would be better to refresh the cache every time we read avail
> > > > index, so let's change vhost_enable_notify() caching the value in
> > > > `avail_idx` and compare it with `last_avail_idx` to check if there
> > > > are new buffers available.
> > > >
> > > > Anyway, we don't expect a significant performance boost because
> > > > the above path is not very common, indeed vhost_enable_notify()
> > > > is often called with unlikely(), expecting that avail index has
> > > > not been updated.
> > > >
> > > > Signed-off-by: Stefano Garzarella 
> > >
> > > ... and can in theory even hurt due to an extra memory write.
> > > So ... performance test restults pls?
> >
> > Right, could be.
> >
> > I'll run some perf test with vsock, about net, do you have a test suite or
> > common step to follow to test it?
> >
> > Thanks,
> > Stefano
>
> You can use the vhost test as a unit test as well.

Thanks for the advice, I did indeed use it!

I run virtio_test (with vhost_test.ko) using 64 as batch to increase the 
chance of the path being taken. (I changed bufs=0x100 in 
virtio_test.c to increase the duration).

I used `perf stat` to take some numbers, running this command:

   taskset -c 2 perf stat -r 10 --log-fd 1 -- ./virtio_test --batch=64

- Linux v5.16 without the patch applied

 Performance counter stats for './virtio_test --batch=64' (10 runs):

  2,791.70 msec task-clock#0.996 CPUs utilized  
  ( +-  0.36% )
23  context-switches  #8.209 /sec   
  ( +-  2.75% )
 0  cpu-migrations#0.000 /sec
79  page-faults   #   28.195 /sec   
  ( +-  0.41% )
 7,249,926,989  cycles#2.587 GHz
  ( +-  0.36% )
 7,711,999,656  instructions  #1.06  insn per cycle 
  ( +-  1.08% )
 1,838,436,806  branches  #  656.134 M/sec  
  ( +-  1.44% )
 3,055,439  branch-misses #0.17% of all branches
  ( +-  6.22% )

2.8024 +- 0.0100 seconds time elapsed  ( +-  0.36% )

- Linux v5.16 with this patch applied

 Performance counter stats for './virtio_test --batch=64' (10 runs):

  2,753.36 msec task-clock#0.998 CPUs utilized  
  ( +-  0.20% )
24  context-switches  #8.699 /sec   
  ( +-  2.86% )
 0  cpu-migrations#0.000 /sec
76  page-faults   #   27.545 /sec   
  ( +-  0.56% )
 7,150,358,721  cycles#2.592 GHz
  ( +-  0.20% )
 7,420,639,950  instructions  #1.04  insn per cycle 
  ( +-  0.76% )
 1,745,759,193  branches  #  632.730 M/sec  
  ( +-  1.03% )
 3,022,508  branch-misses #0.17% of all branches
  ( +-  3.24% )

   2.75952 +- 0.00561 seconds time elapsed  ( +-  0.20% )


The difference seems minimal with a slight improvement.

To try to stress the patch more, I modified vhost_test.ko to call 
vhost_enable_notify()/vhost_disable_notify() on every cycle when calling 
vhost_get_vq_desc():

- Linux v5.16 modified without the patch applied

 Performance counter stats for './virtio_test --batch=64' (10 runs):

  4,126.66 msec task-clock#1.006 CPUs utilized  
  ( +-  0.25% )
28  context-switches  #6.826 /sec   
  ( +-  3.41% )
 0  cpu-migrations#0.000 /sec
85  page-faults   #   20.721 /sec   
  ( +-  0.44% )
10,716,808,883  cycles#2.612 GHz
  ( +-  0.25% )
11,804,381,462  instructions  #1.11  insn per cycle 
  ( +-  0.86% )
 3,138,813,438  branches  #  765.153 M/sec  
  ( +-  1.03% )
11,286,860  branch-misses #0.35% of all branches
  ( +-  1.23% )

4.1027 +- 0.0103 seconds time elapsed  ( +-  0.25% )

-

Re: [PATCH v1] vhost: cache avail index in vhost_enable_notify()

2022-01-20 Thread Michael S. Tsirkin
On Thu, Jan 20, 2022 at 04:08:39PM +0100, Stefano Garzarella wrote:
> On Fri, Jan 14, 2022 at 2:40 PM Michael S. Tsirkin  wrote:
> >
> > On Fri, Jan 14, 2022 at 02:38:16PM +0100, Stefano Garzarella wrote:
> > > On Fri, Jan 14, 2022 at 07:45:35AM -0500, Michael S. Tsirkin wrote:
> > > > On Fri, Jan 14, 2022 at 10:05:08AM +0100, Stefano Garzarella wrote:
> > > > > In vhost_enable_notify() we enable the notifications and we read
> > > > > the avail index to check if new buffers have become available in
> > > > > the meantime.
> > > > >
> > > > > We are not caching the avail index, so when the device will call
> > > > > vhost_get_vq_desc(), it will find the old value in the cache and
> > > > > it will read the avail index again.
> > > > >
> > > > > It would be better to refresh the cache every time we read avail
> > > > > index, so let's change vhost_enable_notify() caching the value in
> > > > > `avail_idx` and compare it with `last_avail_idx` to check if there
> > > > > are new buffers available.
> > > > >
> > > > > Anyway, we don't expect a significant performance boost because
> > > > > the above path is not very common, indeed vhost_enable_notify()
> > > > > is often called with unlikely(), expecting that avail index has
> > > > > not been updated.
> > > > >
> > > > > Signed-off-by: Stefano Garzarella 
> > > >
> > > > ... and can in theory even hurt due to an extra memory write.
> > > > So ... performance test restults pls?
> > >
> > > Right, could be.
> > >
> > > I'll run some perf test with vsock, about net, do you have a test suite or
> > > common step to follow to test it?
> > >
> > > Thanks,
> > > Stefano
> >
> > You can use the vhost test as a unit test as well.
> 
> Thanks for the advice, I did indeed use it!
> 
> I run virtio_test (with vhost_test.ko) using 64 as batch to increase the 
> chance of the path being taken. (I changed bufs=0x100 in 
> virtio_test.c to increase the duration).
> 
> I used `perf stat` to take some numbers, running this command:
> 
>taskset -c 2 perf stat -r 10 --log-fd 1 -- ./virtio_test --batch=64
> 
> - Linux v5.16 without the patch applied
> 
>  Performance counter stats for './virtio_test --batch=64' (10 runs):
> 
>   2,791.70 msec task-clock#0.996 CPUs utilized
> ( +-  0.36% )
> 23  context-switches  #8.209 /sec 
> ( +-  2.75% )
>  0  cpu-migrations#0.000 /sec
> 79  page-faults   #   28.195 /sec 
> ( +-  0.41% )
>  7,249,926,989  cycles#2.587 GHz  
> ( +-  0.36% )
>  7,711,999,656  instructions  #1.06  insn per cycle   
> ( +-  1.08% )
>  1,838,436,806  branches  #  656.134 M/sec
> ( +-  1.44% )
>  3,055,439  branch-misses #0.17% of all branches  
> ( +-  6.22% )
> 
> 2.8024 +- 0.0100 seconds time elapsed  ( +-  0.36% )
> 
> - Linux v5.16 with this patch applied
> 
>  Performance counter stats for './virtio_test --batch=64' (10 runs):
> 
>   2,753.36 msec task-clock#0.998 CPUs utilized
> ( +-  0.20% )
> 24  context-switches  #8.699 /sec 
> ( +-  2.86% )
>  0  cpu-migrations#0.000 /sec
> 76  page-faults   #   27.545 /sec 
> ( +-  0.56% )
>  7,150,358,721  cycles#2.592 GHz  
> ( +-  0.20% )
>  7,420,639,950  instructions  #1.04  insn per cycle   
> ( +-  0.76% )
>  1,745,759,193  branches  #  632.730 M/sec
> ( +-  1.03% )
>  3,022,508  branch-misses #0.17% of all branches  
> ( +-  3.24% )
> 
>2.75952 +- 0.00561 seconds time elapsed  ( +-  0.20% )
> 
> 
> The difference seems minimal with a slight improvement.
> 
> To try to stress the patch more, I modified vhost_test.ko to call 
> vhost_enable_notify()/vhost_disable_notify() on every cycle when calling 
> vhost_get_vq_desc():
> 
> - Linux v5.16 modified without the patch applied
> 
>  Performance counter stats for './virtio_test --batch=64' (10 runs):
> 
>   4,126.66 msec task-clock#1.006 CPUs utilized
> ( +-  0.25% )
> 28  context-switches  #6.826 /sec 
> ( +-  3.41% )
>  0  cpu-migrations#0.000 /sec
> 85  page-faults   #   20.721 /sec 
> ( +-  0.44% )
> 10,716,808,883  cycles#2.612 GHz  
> ( +-  0.25% )
> 11,804,381,462  instructions  #1.11  insn per cycle  

Re: [PATCH v9 09/11] firmware: arm_scmi: Add atomic mode support to virtio transport

2022-01-20 Thread Michael S. Tsirkin
On Thu, Jan 20, 2022 at 08:09:56PM +0100, Peter Hilber wrote:
> On 19.01.22 13:23, Cristian Marussi wrote:
> > On Tue, Jan 18, 2022 at 03:21:03PM +0100, Peter Hilber wrote:
> >> On 21.12.21 15:00, Cristian Marussi wrote:
> >>> Add support for .mark_txdone and .poll_done transport operations to SCMI
> >>> VirtIO transport as pre-requisites to enable atomic operations.
> >>>
> >>> Add a Kernel configuration option to enable SCMI VirtIO transport polling
> >>> and atomic mode for selected SCMI transactions while leaving it default
> >>> disabled.
> >>>
> >>
> >> Hi Cristian,
> >>
> >> thanks for the update. I have some more remarks inline below.
> >>
> > 
> > Hi Peter,
> > 
> > thanks for your review, much appreciated, please see my replies online.
> > 
> >> My impression is that the virtio core does not expose helper functions 
> >> suitable
> >> to busy-poll for used buffers. But changing this might not be difficult. 
> >> Maybe
> >> more_used() from virtio_ring.c could be exposed via a wrapper?
> >>
> > 
> > While I definitely agree that the virtio core support for polling is far 
> > from
> > ideal, some support is provided and my point was at first to try implement 
> > SCMI
> > virtio polling leveraging what we have now in the core and see if it was 
> > attainable
> > (indeed I tried early in this series to avoid as a whole to have to support 
> > polling
> > at the SCMI transport layer to attain SCMI cmds atomicity..but that was an 
> > ill
> > attempt that led nowhere good...)
> > 
> > Btw, I was planning to post a new series next week (after merge-windows) 
> > with some
> > fixes I did already, at this point I'll include also some fixes derived
> > from some of your remarks.
> > 
> >> Best regards,
> >>
> >> Peter
> >>
> [snip]>>> + *
> >>> + * Return: True once polling has successfully completed.
> >>> + */
> >>> +static bool virtio_poll_done(struct scmi_chan_info *cinfo,
> >>> +  struct scmi_xfer *xfer)
> >>> +{
> >>> + bool pending, ret = false;
> >>> + unsigned int length, any_prefetched = 0;
> >>> + unsigned long flags;
> >>> + struct scmi_vio_msg *next_msg, *msg = xfer->priv;
> >>> + struct scmi_vio_channel *vioch = cinfo->transport_info;
> >>> +
> >>> + if (!msg)
> >>> + return true;
> >>> +
> >>> + spin_lock_irqsave(&msg->poll_lock, flags);
> >>> + /* Processed already by other polling loop on another CPU ? */
> >>> + if (msg->poll_idx == VIO_MSG_POLL_DONE) {
> >>> + spin_unlock_irqrestore(&msg->poll_lock, flags);
> >>> + return true;
> >>> + }
> >>> +
> >>> + /* Has cmdq index moved at all ? */
> >>> + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
> >>
> >> In my understanding, the polling comparison could still be subject to the 
> >> ABA
> >> problem when exactly 2**16 messages have been marked as used since
> >> msg->poll_idx was set (unlikely scenario, granted).
> >>
> >> I think this would be a lot simpler if the virtio core exported some
> >> concurrency-safe helper function for such polling (similar to more_used() 
> >> from
> >> virtio_ring.c), as discussed at the top.
> > 
> > So this is the main limitation indeed of the current implementation, I
> > cannot distinguish if there was an exact full wrap and I'm reading the same
> > last_idx as before but a whoppying 2**16 messages have instead gone 
> > through...
> > 
> > The tricky part seems to me here that even introducing dedicated helpers
> > for polling in order to account for such wrapping (similar to more_used())
> > those would be based by current VirtIO spec on a single bit wrap counter,
> > so how do you discern if 2 whole wraps have happened (even more unlikely..) 
> > ?
> > 
> > Maybe I'm missing something though...
> > 
> 
> In my understanding, there is no need to keep track of the old state. We
> actually only want to check whether the device has marked any buffers as 
> `used'
> which we did not retrieve yet via virtqueue_get_buf_ctx().
> 
> This is what more_used() checks in my understanding. One would just need to
> translate the external `struct virtqueue' param to the virtio_ring.c internal
> representation `struct vring_virtqueue' and then call `more_used()'.
> 
> There would be no need to keep `poll_idx` then.
> 
> Best regards,
> 
> Peter

Not really, I don't think so.

There's no magic in more_used. No synchronization happens.
more_used is exactly like virtqueue_poll except
you get to maintain your own index.

As it is, it is quite possible to read the cached index,
then another thread makes 2^16 bufs available, then device
uses them all, and presto you get a false positive.

I guess we can play with memory barriers such that cache
read happens after the index read - but it seems that
will just lead to the same wrap around problem
in reverse. So IIUC it's quite a bit more involved than
just translating structures.

And yes, a more_used like API would remove the need to pass
the index around, but it will also obscure the fact that
there's internal state he

[PATCH 0/3] Introduce akcipher service for virtio-crypto

2022-01-20 Thread zhenwei pi
Introduce akcipher service, implement RSA algorithm, and a minor fix.

zhenwei pi (3):
  virtio_crypto: Introduce VIRTIO_CRYPTO_NOSPC
  virtio-crypto: introduce akcipher service
  virtio-crypto: implement RSA algorithm

 drivers/crypto/virtio/Makefile|   1 +
 .../virtio/virtio_crypto_akcipher_algo.c  | 584 ++
 drivers/crypto/virtio/virtio_crypto_common.h  |   3 +
 drivers/crypto/virtio/virtio_crypto_core.c|   6 +-
 drivers/crypto/virtio/virtio_crypto_mgr.c |  11 +
 include/uapi/linux/virtio_crypto.h|  98 ++-
 6 files changed, 693 insertions(+), 10 deletions(-)
 create mode 100644 drivers/crypto/virtio/virtio_crypto_akcipher_algo.c

-- 
2.25.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH 1/3] virtio_crypto: Introduce VIRTIO_CRYPTO_NOSPC

2022-01-20 Thread zhenwei pi
Base on the lastest virtio crypto spec, define VIRTIO_CRYPTO_NOSPC.

Signed-off-by: zhenwei pi 
---
 include/uapi/linux/virtio_crypto.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/uapi/linux/virtio_crypto.h 
b/include/uapi/linux/virtio_crypto.h
index a03932f10565..1166a49084b0 100644
--- a/include/uapi/linux/virtio_crypto.h
+++ b/include/uapi/linux/virtio_crypto.h
@@ -408,6 +408,7 @@ struct virtio_crypto_op_data_req {
 #define VIRTIO_CRYPTO_BADMSG2
 #define VIRTIO_CRYPTO_NOTSUPP   3
 #define VIRTIO_CRYPTO_INVSESS   4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */
 
 /* The accelerator hardware is ready */
 #define VIRTIO_CRYPTO_S_HW_READY  (1 << 0)
-- 
2.25.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH 2/3] virtio-crypto: introduce akcipher service

2022-01-20 Thread zhenwei pi
Introduce asymmetric service definition, asymmetric operations and
several well known algorithms.

Co-developed-by: lei he 
Signed-off-by: lei he 
Signed-off-by: zhenwei pi 
---
 include/uapi/linux/virtio_crypto.h | 99 +++---
 1 file changed, 89 insertions(+), 10 deletions(-)

diff --git a/include/uapi/linux/virtio_crypto.h 
b/include/uapi/linux/virtio_crypto.h
index 1166a49084b0..050578d61d85 100644
--- a/include/uapi/linux/virtio_crypto.h
+++ b/include/uapi/linux/virtio_crypto.h
@@ -33,10 +33,11 @@
 #include 
 
 
-#define VIRTIO_CRYPTO_SERVICE_CIPHER 0
-#define VIRTIO_CRYPTO_SERVICE_HASH   1
-#define VIRTIO_CRYPTO_SERVICE_MAC2
-#define VIRTIO_CRYPTO_SERVICE_AEAD   3
+#define VIRTIO_CRYPTO_SERVICE_CIPHER   0
+#define VIRTIO_CRYPTO_SERVICE_HASH 1
+#define VIRTIO_CRYPTO_SERVICE_MAC  2
+#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
 
 #define VIRTIO_CRYPTO_OPCODE(service, op)   (((service) << 8) | (op))
 
@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
 #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+  VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+  VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
__le32 opcode;
__le32 algo;
__le32 flag;
@@ -180,6 +185,57 @@ struct virtio_crypto_aead_create_session_req {
__u8 padding[32];
 };
 
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING   0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+   __le32 padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH   0
+#define VIRTIO_CRYPTO_RSA_MD2   1
+#define VIRTIO_CRYPTO_RSA_MD3   2
+#define VIRTIO_CRYPTO_RSA_MD4   3
+#define VIRTIO_CRYPTO_RSA_MD5   4
+#define VIRTIO_CRYPTO_RSA_SHA1  5
+#define VIRTIO_CRYPTO_RSA_SHA2566
+#define VIRTIO_CRYPTO_RSA_SHA3847
+#define VIRTIO_CRYPTO_RSA_SHA5128
+#define VIRTIO_CRYPTO_RSA_SHA2249
+   __le32 hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN   0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+   __le32 curve_id;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA   1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA   2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+   __le32 algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC  1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+   __le32 keytype;
+   __le32 keylen;
+
+   union {
+   struct virtio_crypto_rsa_session_para rsa;
+   struct virtio_crypto_ecdsa_session_para ecdsa;
+   } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+   struct virtio_crypto_akcipher_session_para para;
+   __u8 padding[36];
+};
+
 struct virtio_crypto_alg_chain_session_para {
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER  1
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH  2
@@ -247,6 +303,8 @@ struct virtio_crypto_op_ctrl_req {
mac_create_session;
struct virtio_crypto_aead_create_session_req
aead_create_session;
+   struct virtio_crypto_akcipher_create_session_req
+   akcipher_create_session;
struct virtio_crypto_destroy_session_req
destroy_session;
__u8 padding[56];
@@ -266,6 +324,14 @@ struct virtio_crypto_op_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
 #define VIRTIO_CRYPTO_AEAD_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
__le32 opcode;
/* algo should be service-specific algorithms */
__le32 algo;
@@ -390,6 +456,16 @@ struct virtio_crypto_aead_data_req {
__u8 padding[32];
 };
 
+struct virtio_crypto_akcipher_para {
+   __le32 src_data_len;
+   __le32 dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+   struct virtio_crypto_akcipher_para para;
+   __u8 padding[40];
+};
+
 /* The request of the data virtqueue's packet */
 struct virtio_crypto_op_data_req {
struct virt

[PATCH 3/3] virtio-crypto: implement RSA algorithm

2022-01-20 Thread zhenwei pi
Support rsa & pkcs1pad(rsa,sha1) with priority 150.

Test with QEMU built-in backend, it works fine.
1, The self-test framework of crypto layer works fine in guest kernel
2, Test with Linux guest(with asym support), the following script
test(note that pkey_XXX is supported only in a newer version of keyutils):
  - both public key & private key
  - create/close session
  - encrypt/decrypt/sign/verify basic driver operation
  - also test with kernel crypto layer(pkey add/query)

All the cases work fine.

rm -rf *.der *.pem *.pfx
modprobe pkcs8_key_parser # if CONFIG_PKCS8_PRIVATE_KEY_PARSER=m
rm -rf /tmp/data
dd if=/dev/random of=/tmp/data count=1 bs=226

openssl req -nodes -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -subj 
"/C=CN/ST=BJ/L=HD/O=qemu/OU=dev/CN=qemu/emailAddress=q...@qemu.org"
openssl pkcs8 -in key.pem -topk8 -nocrypt -outform DER -out key.der
openssl x509 -in cert.pem -inform PEM -outform DER -out cert.der

PRIV_KEY_ID=`cat key.der | keyctl padd asymmetric test_priv_key @s`
echo "priv key id = "$PRIV_KEY_ID
PUB_KEY_ID=`cat cert.der | keyctl padd asymmetric test_pub_key @s`
echo "pub key id = "$PUB_KEY_ID

keyctl pkey_query $PRIV_KEY_ID 0
keyctl pkey_query $PUB_KEY_ID 0

echo "Enc with priv key..."
keyctl pkey_encrypt $PRIV_KEY_ID 0 /tmp/data enc=pkcs1 >/tmp/enc.priv
echo "Dec with pub key..."
keyctl pkey_decrypt $PRIV_KEY_ID 0 /tmp/enc.priv enc=pkcs1 >/tmp/dec
cmp /tmp/data /tmp/dec

echo "Sign with priv key..."
keyctl pkey_sign $PRIV_KEY_ID 0 /tmp/data enc=pkcs1 hash=sha1 > /tmp/sig
echo "Verify with pub key..."
keyctl pkey_verify $PRIV_KEY_ID 0 /tmp/data /tmp/sig enc=pkcs1 hash=sha1

echo "Enc with pub key..."
keyctl pkey_encrypt $PUB_KEY_ID 0 /tmp/data enc=pkcs1 >/tmp/enc.pub
echo "Dec with priv key..."
keyctl pkey_decrypt $PRIV_KEY_ID 0 /tmp/enc.pub enc=pkcs1 >/tmp/dec
cmp /tmp/data /tmp/dec

echo "Verify with pub key..."
keyctl pkey_verify $PUB_KEY_ID 0 /tmp/data /tmp/sig enc=pkcs1 hash=sha1

Co-developed-by: lei he 
Signed-off-by: lei he 
Signed-off-by: zhenwei pi 
---
 drivers/crypto/virtio/Makefile|   1 +
 .../virtio/virtio_crypto_akcipher_algo.c  | 584 ++
 drivers/crypto/virtio/virtio_crypto_common.h  |   3 +
 drivers/crypto/virtio/virtio_crypto_core.c|   6 +-
 drivers/crypto/virtio/virtio_crypto_mgr.c |  11 +
 5 files changed, 604 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/virtio/virtio_crypto_akcipher_algo.c

diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
index cbffa135..06b23c5e784e 100644
--- a/drivers/crypto/virtio/Makefile
+++ b/drivers/crypto/virtio/Makefile
@@ -2,5 +2,6 @@
 obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
 virtio_crypto-objs := \
virtio_crypto_algs.o \
+   virtio_crypto_akcipher_algo.o \
virtio_crypto_mgr.o \
virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algo.c 
b/drivers/crypto/virtio/virtio_crypto_akcipher_algo.c
new file mode 100644
index ..eebccf71b2fa
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algo.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Asymmetric algorithms supported by virtio crypto device
+  *
+  * Authors: zhenwei pi 
+  *  lei he 
+  *
+  * Copyright 2022 Bytedance CO., LTD.
+  */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include "virtio_crypto_common.h"
+
+struct virtio_crypto_rsa_ctx {
+   MPI n;
+};
+
+struct virtio_crypto_akcipher_ctx {
+   struct crypto_engine_ctx enginectx;
+   struct virtio_crypto *vcrypto;
+   struct crypto_akcipher *tfm;
+   bool session_valid;
+   __u64 session_id;
+   union {
+   struct virtio_crypto_rsa_ctx rsa_ctx;
+   };
+};
+
+struct virtio_crypto_akcipher_request {
+   struct virtio_crypto_request base;
+   struct virtio_crypto_akcipher_ctx *akcipher_ctx;
+   struct akcipher_request *akcipher_req;
+   void *src_buf;
+   void *dst_buf;
+   uint32_t opcode;
+};
+
+struct virtio_crypto_akcipher_algo {
+   uint32_t algonum;
+   uint32_t service;
+   unsigned int active_devs;
+   struct akcipher_alg algo;
+};
+
+static DEFINE_MUTEX(algs_lock);
+
+static void virtio_crypto_akcipher_finalize_req(
+   struct virtio_crypto_akcipher_request *vc_akcipher_req,
+   struct akcipher_request *req, int err)
+{
+   virtcrypto_clear_request(&vc_akcipher_req->base);
+
+   crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, 
req, err);
+}
+
+static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request 
*vc_req, int len)
+{
+   struct virtio_crypto_akcipher_request *vc_akcipher_req =
+   container_of(vc_req, struct virtio_crypto_akcipher_request, 
base);
+   struct akcipher_request *akcipher_req;
+   int error;
+
+   switch (vc_req->status) {
+   case VIRTIO_CRYPTO_OK:
+  

Re: [PATCH v2 07/12] virtio: queue_reset: pci: support VIRTIO_F_RING_RESET

2022-01-20 Thread Xuan Zhuo
On Thu, 20 Jan 2022 10:03:45 -0500, Michael S. Tsirkin  wrote:
> On Thu, Jan 20, 2022 at 07:46:20PM +0800, Xuan Zhuo wrote:
> > On Thu, 20 Jan 2022 05:55:14 -0500, Michael S. Tsirkin  
> > wrote:
> > > On Thu, Jan 20, 2022 at 02:42:58PM +0800, Xuan Zhuo wrote:
> > > > This patch implements virtio pci support for QUEUE RESET.
> > > >
> > > > Performing reset on a queue is divided into two steps:
> > > >
> > > > 1. reset_vq: reset one vq
> > > > 2. enable_reset_vq: re-enable the reset queue
> > > >
> > > > In the first step, these tasks will be completed:
> > > >1. notify the hardware queue to reset
> > > >2. recycle the buffer from vq
> > > >3. delete the vq
> > > >
> > > > When deleting a vq, vp_del_vq() will be called to release all the memory
> > > > of the vq. But this does not affect the process of deleting vqs, because
> > > > that is based on the queue to release all the vqs. During this process,
> > > > the vq has been removed from the queue.
> > > >
> > > > When deleting vq, info and vq will be released, and I save msix_vec in
> > > > vp_dev->vqs[queue_index]. When re-enable, the current msix_vec can be
> > > > reused. And based on intx_enabled to determine which method to use to
> > > > enable this queue.
> > > >
> > > > Signed-off-by: Xuan Zhuo 
> > >
> > > There's something I don't understand here. It looks like
> > > you assume that when you reset a queue, you also
> > > reset the mapping from queue to event vector.
> > > The spec does not say it should, and I don't think it's
> > > useful to extend spec to do it - we already have a simple
> > > way to tweak the mapping.
> > >
> >
> > Sorry, what is the already existing method you are referring to, I didn't 
> > find
> > it.
>
>
> Write 0x into vector number.

I wonder if there is some misunderstanding here.

My purpose is to release vq, then for the vector used by vq, I hope that it can
be reused when re-enable.

But the vector number is not in a fixed order. When I re-enable it, I don't know
what the original vector number is. So I found a place to save this number.

The queue reset I implemented is divided into the following steps:
1. notify the driver to queue reset
2. disable_irq()
3. free unused bufs
4. free irq, free vq, free info

The process of enable is divided into the following steps:
1. Get the vector number used by the original vq and re-setup vq
2. enable vq
3. enable irq

If there is anything unreasonable please let me know.

Thanks.

>
> > I think you mean that we don't have to reset the event vector, I think you 
> > are
> > right.
> >
> >
> >
> > Thanks.
> >
> > > Avoid doing that, and things will be much easier, with no need
> > > to interact with a transport, won't they?
> > >
> > >
> > > > ---
> > > >  drivers/virtio/virtio_pci_common.c | 49 
> > > >  drivers/virtio/virtio_pci_common.h |  4 ++
> > > >  drivers/virtio/virtio_pci_modern.c | 73 ++
> > > >  3 files changed, 126 insertions(+)
> > > >
> > > > diff --git a/drivers/virtio/virtio_pci_common.c 
> > > > b/drivers/virtio/virtio_pci_common.c
> > > > index 5afe207ce28a..28b5ffde4621 100644
> > > > --- a/drivers/virtio/virtio_pci_common.c
> > > > +++ b/drivers/virtio/virtio_pci_common.c
> > > > @@ -464,6 +464,55 @@ int vp_find_vqs(struct virtio_device *vdev, 
> > > > unsigned nvqs,
> > > > return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
> > > >  }
> > > >
> > > > +#define VQ_IS_DELETED(vp_dev, idx) ((unsigned long)vp_dev->vqs[idx] & 
> > > > 1)
> > > > +#define VQ_RESET_MSIX_VEC(vp_dev, idx) ((unsigned 
> > > > long)vp_dev->vqs[idx] >> 2)
> > > > +#define VQ_RESET_MARK(msix_vec) ((void *)(long)((msix_vec << 2) + 1))
> > > > +
> > > > +void vp_del_reset_vq(struct virtio_device *vdev, u16 queue_index)
> > > > +{
> > > > +   struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > > +   struct virtio_pci_vq_info *info;
> > > > +   u16 msix_vec;
> > > > +
> > > > +   info = vp_dev->vqs[queue_index];
> > > > +
> > > > +   msix_vec = info->msix_vector;
> > > > +
> > > > +   /* delete vq */
> > > > +   vp_del_vq(info->vq);
> > > > +
> > > > +   /* Mark the vq has been deleted, and save the msix_vec. */
> > > > +   vp_dev->vqs[queue_index] = VQ_RESET_MARK(msix_vec);
> > > > +}
> > > > +
> > > > +struct virtqueue *vp_enable_reset_vq(struct virtio_device *vdev,
> > > > +int queue_index,
> > > > +vq_callback_t *callback,
> > > > +const char *name,
> > > > +const bool ctx)
> > > > +{
> > > > +   struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > > +   struct virtqueue *vq;
> > > > +   u16 msix_vec;
> > > > +
> > > > +   if (!VQ_IS_DELETED(vp_dev, queue_index))
> > > > +   return ERR_PTR(-EPERM);
> > > > +
> > > > +   m

Re: [PATCH 3/3] virtio-crypto: implement RSA algorithm

2022-01-20 Thread kernel test robot
Hi zhenwei,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on herbert-cryptodev-2.6/master]
[also build test WARNING on herbert-crypto-2.6/master linux/master linus/master 
v5.16 next-20220121]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/zhenwei-pi/Introduce-akcipher-service-for-virtio-crypto/20220121-102730
base:   
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git master
config: hexagon-randconfig-r026-20220120 
(https://download.01.org/0day-ci/archive/20220121/202201211427.tgczsuoo-...@intel.com/config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project 
d4baf3b1322b84816aa623d8e8cb45a49cb68b84)
reproduce (this is a W=1 build):
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# 
https://github.com/0day-ci/linux/commit/fa1045d13dd16399ab0287c599719a977892cf05
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review 
zhenwei-pi/Introduce-akcipher-service-for-virtio-crypto/20220121-102730
git checkout fa1045d13dd16399ab0287c599719a977892cf05
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 
O=build_dir ARCH=hexagon SHELL=/bin/bash drivers/crypto/virtio/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All warnings (new ones prefixed by >>):

>> drivers/crypto/virtio/virtio_crypto_akcipher_algo.c:276:5: warning: no 
>> previous prototype for function 'virtio_crypto_rsa_do_req' 
>> [-Wmissing-prototypes]
   int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
   ^
   drivers/crypto/virtio/virtio_crypto_akcipher_algo.c:276:1: note: declare 
'static' if the function is not intended to be used outside of this translation 
unit
   int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
   ^
   static 
   1 warning generated.


vim +/virtio_crypto_rsa_do_req +276 
drivers/crypto/virtio/virtio_crypto_akcipher_algo.c

   275  
 > 276  int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
   277  {
   278  struct akcipher_request *req = container_of(vreq, struct 
akcipher_request, base);
   279  struct virtio_crypto_akcipher_request *vc_akcipher_req = 
akcipher_request_ctx(req);
   280  struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
   281  struct virtio_crypto_akcipher_ctx *ctx = 
vc_akcipher_req->akcipher_ctx;
   282  struct virtio_crypto *vcrypto = ctx->vcrypto;
   283  struct data_queue *data_vq = vc_req->dataq;
   284  struct virtio_crypto_op_header *header;
   285  struct virtio_crypto_akcipher_data_req *akcipher_req;
   286  int ret;
   287  
   288  vc_req->sgs = NULL;
   289  vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
   290  GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
   291  if (!vc_req->req_data)
   292  return -ENOMEM;
   293  
   294  /* build request header */
   295  header = &vc_req->req_data->header;
   296  header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
   297  header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
   298  header->session_id = cpu_to_le64(ctx->session_id);
   299  
   300  /* build request akcipher data */
   301  akcipher_req = &vc_req->req_data->u.akcipher_req;
   302  akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
   303  akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
   304  
   305  ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, 
data_vq);
   306  if (ret < 0) {
   307  kfree_sensitive(vc_req->req_data);
   308  vc_req->req_data = NULL;
   309  return ret;
   310  }
   311  
   312  return 0;
   313  }
   314  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization