Re: [RFC 1/4] vduse: Add the struct to save the vq reconnect info

2023-06-28 Thread Jason Wang
On Wed, Jun 28, 2023 at 2:59 PM Cindy Lu  wrote:
>
> From: Your Name 

It looks to me your git is not properly configured.

>
> this struct is to save the reconnect info struct, in this
> struct saved the page info that alloc to save the
> reconnect info
>
> Signed-off-by: Cindy Lu 
> ---
>  drivers/vdpa/vdpa_user/vduse_dev.c | 7 +++
>  1 file changed, 7 insertions(+)
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c 
> b/drivers/vdpa/vdpa_user/vduse_dev.c
> index 26b7e29cb900..f845dc46b1db 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -72,6 +72,12 @@ struct vduse_umem {
> struct page **pages;
> struct mm_struct *mm;
>  };
> +struct vdpa_reconnect_info {
> +   u32 index;
> +   phys_addr_t addr;
> +   unsigned long vaddr;
> +   phys_addr_t size;
> +};

Please add comments to explain each field. And I think this should be
a part of uAPI?

Thanks

>
>  struct vduse_dev {
> struct vduse_vdpa *vdev;
> @@ -106,6 +112,7 @@ struct vduse_dev {
> u32 vq_align;
> struct vduse_umem *umem;
> struct mutex mem_lock;
> +   struct vdpa_reconnect_info reconnect_info[64];
>  };
>
>  struct vduse_dev_msg {
> --
> 2.34.3
>

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [RFC 2/4] vduse: Add file operation for mmap

2023-06-28 Thread Jason Wang
On Wed, Jun 28, 2023 at 2:59 PM Cindy Lu  wrote:
>
> From: Your Name 
>
> Add the operation for mmap, The user space APP will
> use this function to map the pages to userspace

Please be specific in the log. E.g why and what the main goal for this mmap.

>
> Signed-off-by: Cindy Lu 
> ---
>  drivers/vdpa/vdpa_user/vduse_dev.c | 49 ++
>  1 file changed, 49 insertions(+)
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c 
> b/drivers/vdpa/vdpa_user/vduse_dev.c
> index f845dc46b1db..1b833bf0ae37 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -1313,6 +1313,54 @@ static struct vduse_dev *vduse_dev_get_from_minor(int 
> minor)
> return dev;
>  }
>
> +
> +static vm_fault_t vduse_vm_fault(struct vm_fault *vmf)
> +{
> +   struct vduse_dev *dev = vmf->vma->vm_file->private_data;
> +   struct vm_area_struct *vma = vmf->vma;
> +   u16 index = vma->vm_pgoff;
> +
> +   struct vdpa_reconnect_info *info;
> +   info = &dev->reconnect_info[index];
> +
> +   vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
> +   if (remap_pfn_range(vma, vmf->address & PAGE_MASK, 
> PFN_DOWN(info->addr),
> +   PAGE_SIZE, vma->vm_page_prot))

I'm not sure if this can work e.g do we want to use separate pages for
each virtqueue (I think the answer is yes).

> +   return VM_FAULT_SIGBUS;
> +   return VM_FAULT_NOPAGE;
> +}
> +
> +static const struct vm_operations_struct vduse_vm_ops = {
> +   .fault = vduse_vm_fault,
> +};
> +
> +static int vduse_mmap(struct file *file, struct vm_area_struct *vma)
> +{
> +   struct vduse_dev *dev = file->private_data;
> +   struct vdpa_reconnect_info *info;
> +   unsigned long index = vma->vm_pgoff;
> +
> +   if (vma->vm_end - vma->vm_start != PAGE_SIZE)
> +   return -EINVAL;
> +   if ((vma->vm_flags & VM_SHARED) == 0)
> +   return -EINVAL;
> +
> +   if (index > 65535)
> +   return -EINVAL;
> +
> +   info = &dev->reconnect_info[index];
> +   if (info->addr & (PAGE_SIZE - 1))
> +   return -EINVAL;
> +   if (vma->vm_end - vma->vm_start != info->size) {
> +   return -ENOTSUPP;
> +   }

How can userspace know the correct size (info->size) here?

> +
> +   vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);

Why do you need VM_IO, VM_PFNMAP and VM_DONTDUMP here?

Thanks

> +   vma->vm_ops = &vduse_vm_ops;
> +
> +   return 0;
> +}
> +
>  static int vduse_dev_open(struct inode *inode, struct file *file)
>  {
> int ret;
> @@ -1345,6 +1393,7 @@ static const struct file_operations vduse_dev_fops = {
> .unlocked_ioctl = vduse_dev_ioctl,
> .compat_ioctl   = compat_ptr_ioctl,
> .llseek = noop_llseek,
> +   .mmap   = vduse_mmap,
>  };
>
>  static struct vduse_dev *vduse_dev_create(void)
> --
> 2.34.3
>

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [RFC 3/4] vduse: Add the function for get/free the mapp pages

2023-06-28 Thread Jason Wang
On Wed, Jun 28, 2023 at 2:59 PM Cindy Lu  wrote:
>
> From: Your Name 
>
> Add the function for get/free pages, ad this info
> will saved in dev->reconnect_info

I think this should be squashed to patch 2 otherwise it fixes a bug
that is introduced in patch 2?

>
> Signed-off-by: Cindy Lu 
> ---
>  drivers/vdpa/vdpa_user/vduse_dev.c | 35 ++
>  1 file changed, 35 insertions(+)
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c 
> b/drivers/vdpa/vdpa_user/vduse_dev.c
> index 1b833bf0ae37..3df1256eccb4 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -1313,6 +1313,35 @@ static struct vduse_dev *vduse_dev_get_from_minor(int 
> minor)
> return dev;
>  }
>
> +int vduse_get_vq_reconnnect(struct vduse_dev *dev, u16 idx)
> +{
> +   struct vdpa_reconnect_info *area;
> +   void *addr = (void *)get_zeroed_page(GFP_KERNEL);
> +
> +   area = &dev->reconnect_info[idx];
> +
> +   area->addr = virt_to_phys(addr);
> +   area->vaddr = (unsigned long)addr;
> +   area->size = PAGE_SIZE;
> +   area->index = idx;
> +
> +   return 0;
> +}
> +
> +int vduse_free_vq_reconnnect(struct vduse_dev *dev, u16 idx)
> +{
> +   struct vdpa_reconnect_info *area;
> +
> +   area = &dev->reconnect_info[idx];
> +   if ((area->size == PAGE_SIZE) && (area->addr != NULL)) {
> +   free_page(area->vaddr);
> +   area->size = 0;
> +   area->addr = 0;
> +   area->vaddr = 0;
> +   }
> +
> +   return 0;
> +}
>
>  static vm_fault_t vduse_vm_fault(struct vm_fault *vmf)
>  {
> @@ -1446,6 +1475,10 @@ static int vduse_destroy_dev(char *name)
> mutex_unlock(&dev->lock);
> return -EBUSY;
> }
> +   for (int i = 0; i < dev->vq_num; i++) {
> +
> +   vduse_free_vq_reconnnect(dev, i);
> +   }
> dev->connected = true;
> mutex_unlock(&dev->lock);
>
> @@ -1583,6 +1616,8 @@ static int vduse_create_dev(struct vduse_dev_config 
> *config,
> INIT_WORK(&dev->vqs[i].kick, vduse_vq_kick_work);
> spin_lock_init(&dev->vqs[i].kick_lock);
> spin_lock_init(&dev->vqs[i].irq_lock);
> +
> +   vduse_get_vq_reconnnect(dev, i);

Can we delay the allocated until fault?

Thanks

> }
>
> ret = idr_alloc(&vduse_idr, dev, 1, VDUSE_DEV_MAX, GFP_KERNEL);
> --
> 2.34.3
>

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [RFC 4/4] vduse: update the vq_info in ioctl

2023-06-28 Thread Jason Wang
On Wed, Jun 28, 2023 at 3:00 PM Cindy Lu  wrote:
>
> From: Your Name 
>
> in VDUSE_VQ_GET_INFO, driver will sync the last_avail_idx
> with reconnect info, I have olny test the split mode, so

Typo, should be "only".

> only use this here, will add more information later
>
> Signed-off-by: Cindy Lu 
> ---
>  drivers/vdpa/vdpa_user/vduse_dev.c | 16 
>  1 file changed, 16 insertions(+)
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c 
> b/drivers/vdpa/vdpa_user/vduse_dev.c
> index 3df1256eccb4..b8e453eac0ce 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -141,6 +141,11 @@ static u32 allowed_device_id[] = {
> VIRTIO_ID_NET,
>  };
>
> +struct vhost_reconnect_vring {
> +   uint16_t last_avail_idx;
> +   bool avail_wrap_counter;
> +};

Should this belong to uAPI?

> +
>  static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa)
>  {
> struct vduse_vdpa *vdev = container_of(vdpa, struct vduse_vdpa, vdpa);
> @@ -1176,6 +1181,17 @@ static long vduse_dev_ioctl(struct file *file, 
> unsigned int cmd,
> vq->state.split.avail_index;
>
> vq_info.ready = vq->ready;
> +   struct vdpa_reconnect_info *area;
> +
> +   area = &dev->reconnect_info[index];
> +   struct vhost_reconnect_vring *log_reconnect;
> +
> +   log_reconnect = (struct vhost_reconnect_vring *)area->vaddr;

What if userspace doesn't do mmap()?

Thanks

> +   if (log_reconnect->last_avail_idx !=
> +   vq_info.split.avail_index) {
> +   vq_info.split.avail_index =
> +   log_reconnect->last_avail_idx;
> +   }
>
> ret = -EFAULT;
> if (copy_to_user(argp, &vq_info, sizeof(vq_info)))
> --
> 2.34.3
>

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [RFC PATCHES 00/17] IOMMUFD: Deliver IO page faults to user space

2023-06-28 Thread Jason Gunthorpe
On Wed, Jun 28, 2023 at 10:00:56AM +0800, Baolu Lu wrote:
> > If the driver created a SVA domain then the op should point to some
> > generic 'handle sva fault' function. There shouldn't be weird SVA
> > stuff in the core code.
> > 
> > The weird SVA stuff is really just a generic per-device workqueue
> > dispatcher, so if we think that is valuable then it should be
> > integrated into the iommu_domain (domain->ops->use_iopf_workqueue =
> > true for instance). Then it could route the fault through the
> > workqueue and still invoke domain->ops->iopf_handler.
> > 
> > The word "SVA" should not appear in any of this.
> 
> Yes. We should make it generic. The domain->use_iopf_workqueue flag
> denotes that the page faults of a fault group should be put together and
> then be handled and responded in a workqueue. Otherwise, the page fault
> is dispatched to domain->iopf_handler directly.

It might be better to have iopf_handler and
iopf_handler_work function pointers to distinguish to two cases.

> > Not sure what iommu_register_device_fault_handler() has to do with all
> > of this.. Setting up the dev_iommu stuff to allow for the workqueue
> > should happen dynamically during domain attach, ideally in the core
> > code before calling to the driver.
> 
> There are two pointers under struct dev_iommu for fault handling.
> 
> /**
>  * struct dev_iommu - Collection of per-device IOMMU data
>  *
>  * @fault_param: IOMMU detected device fault reporting data
>  * @iopf_param:  I/O Page Fault queue and data
> 
> [...]
> 
> struct dev_iommu {
> struct mutex lock;
> struct iommu_fault_param*fault_param;
> struct iopf_device_param*iopf_param;
> 
> My understanding is that @fault_param is a place holder for generic
> things, while @iopf_param is workqueue specific.

Well, lets look

struct iommu_fault_param {
iommu_dev_fault_handler_t handler;
void *data;

These two make no sense now. handler is always iommu_queue_iopf. Given
our domain centric design we want the function pointer in the domain,
not in the device. So delete it.

struct list_head faults;
struct mutex lock;

Queue of unhandled/unacked faults? Seems sort of reasonable

> @iopf_param could be allocated on demand. (perhaps renaming it to a more
> meaningful one?) It happens before a domain with use_iopf_workqueue flag
> set attaches to a device. iopf_param keeps alive until device_release.

Yes

Do this for the iommu_fault_param as well, in fact, probably just put
the two things together in one allocation and allocate if we attach a
PRI using domain. I don't think we need to micro optimze further..
 
Jason
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH V3 net-next 1/2] virtio-net: convert rx mode setting to use workqueue

2023-06-28 Thread Michael S. Tsirkin
On Wed, May 31, 2023 at 09:07:25AM +0800, Jason Wang wrote:
> On Mon, May 29, 2023 at 9:21 AM Jason Wang  wrote:
> >
> > On Sun, May 28, 2023 at 7:39 PM Michael S. Tsirkin  wrote:
> > >
> > > On Fri, May 26, 2023 at 09:31:34AM +0800, Jason Wang wrote:
> > > > On Thu, May 25, 2023 at 3:41 PM Michael S. Tsirkin  
> > > > wrote:
> > > > >
> > > > > On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote:
> > > > > > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin 
> > > > > >  wrote:
> > > > > > >
> > > > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote:
> > > > > > > > This patch convert rx mode setting to be done in a workqueue, 
> > > > > > > > this is
> > > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > > response since current code is executed under addr spin lock.
> > > > > > > >
> > > > > > > > Signed-off-by: Jason Wang 
> > > > > > > > ---
> > > > > > > > Changes since V1:
> > > > > > > > - use RTNL to synchronize rx mode worker
> > > > > > > > ---
> > > > > > > >  drivers/net/virtio_net.c | 55 
> > > > > > > > +---
> > > > > > > >  1 file changed, 52 insertions(+), 3 deletions(-)
> > > > > > > >
> > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > > > index 56ca1d270304..5d2f1da4eaa0 100644
> > > > > > > > --- a/drivers/net/virtio_net.c
> > > > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > > > @@ -265,6 +265,12 @@ struct virtnet_info {
> > > > > > > >   /* Work struct for config space updates */
> > > > > > > >   struct work_struct config_work;
> > > > > > > >
> > > > > > > > + /* Work struct for config rx mode */
> > > > > > >
> > > > > > > With a bit less abbreviation maybe? setting rx mode?
> > > > > >
> > > > > > That's fine.
> > > > > >
> > > > > > >
> > > > > > > > + struct work_struct rx_mode_work;
> > > > > > > > +
> > > > > > > > + /* Is rx mode work enabled? */
> > > > > > >
> > > > > > > Ugh not a great comment.
> > > > > >
> > > > > > Any suggestions for this. E.g we had:
> > > > > >
> > > > > > /* Is delayed refill enabled? */
> > > > >
> > > > > /* OK to queue work setting RX mode? */
> > > >
> > > > Ok.
> > > >
> > > > >
> > > > >
> > > > > > >
> > > > > > > > + bool rx_mode_work_enabled;
> > > > > > > > +
> > > > > > >
> > > > > > >
> > > > > > >
> > > > > > > >   /* Does the affinity hint is set for virtqueues? */
> > > > > > > >   bool affinity_hint_set;
> > > > > > > >
> > > > > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct 
> > > > > > > > virtnet_info *vi)
> > > > > > > >   spin_unlock_bh(&vi->refill_lock);
> > > > > > > >  }
> > > > > > > >
> > > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi)
> > > > > > > > +{
> > > > > > > > + rtnl_lock();
> > > > > > > > + vi->rx_mode_work_enabled = true;
> > > > > > > > + rtnl_unlock();
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static void disable_rx_mode_work(struct virtnet_info *vi)
> > > > > > > > +{
> > > > > > > > + rtnl_lock();
> > > > > > > > + vi->rx_mode_work_enabled = false;
> > > > > > > > + rtnl_unlock();
> > > > > > > > +}
> > > > > > > > +
> > > > > > > >  static void virtqueue_napi_schedule(struct napi_struct *napi,
> > > > > > > >   struct virtqueue *vq)
> > > > > > > >  {
> > > > > > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct 
> > > > > > > > net_device *dev)
> > > > > > > >   return 0;
> > > > > > > >  }
> > > > > > > >
> > > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev)
> > > > > > > > +static void virtnet_rx_mode_work(struct work_struct *work)
> > > > > > > >  {
> > > > > > > > - struct virtnet_info *vi = netdev_priv(dev);
> > > > > > > > + struct virtnet_info *vi =
> > > > > > > > + container_of(work, struct virtnet_info, 
> > > > > > > > rx_mode_work);
> > > > > > > > + struct net_device *dev = vi->dev;
> > > > > > > >   struct scatterlist sg[2];
> > > > > > > >   struct virtio_net_ctrl_mac *mac_data;
> > > > > > > >   struct netdev_hw_addr *ha;
> > > > > > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct 
> > > > > > > > net_device *dev)
> > > > > > > >   if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
> > > > > > > >   return;
> > > > > > > >
> > > > > > > > + rtnl_lock();
> > > > > > > > +
> > > > > > > >   vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> > > > > > > >   vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
> > > > > > > >
> > > > > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct 
> > > > > > > > net_device *dev)
> > > > > > > >   dev_warn(&dev->dev, "Failed to %sable allmulti 
> > > > > > > > mode.\n",
> > > > > > > >vi->ctrl->allmulti ? "en" : "dis");
> > > > > > > >
> > > > > > > > + netif_addr_lock_bh(dev)

Re: [PATCH V3 net-next 1/2] virtio-net: convert rx mode setting to use workqueue

2023-06-28 Thread Jason Wang
On Wed, Jun 28, 2023 at 9:34 PM Michael S. Tsirkin  wrote:
>
> On Wed, May 31, 2023 at 09:07:25AM +0800, Jason Wang wrote:
> > On Mon, May 29, 2023 at 9:21 AM Jason Wang  wrote:
> > >
> > > On Sun, May 28, 2023 at 7:39 PM Michael S. Tsirkin  
> > > wrote:
> > > >
> > > > On Fri, May 26, 2023 at 09:31:34AM +0800, Jason Wang wrote:
> > > > > On Thu, May 25, 2023 at 3:41 PM Michael S. Tsirkin  
> > > > > wrote:
> > > > > >
> > > > > > On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote:
> > > > > > > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin 
> > > > > > >  wrote:
> > > > > > > >
> > > > > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote:
> > > > > > > > > This patch convert rx mode setting to be done in a workqueue, 
> > > > > > > > > this is
> > > > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > > > response since current code is executed under addr spin lock.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Jason Wang 
> > > > > > > > > ---
> > > > > > > > > Changes since V1:
> > > > > > > > > - use RTNL to synchronize rx mode worker
> > > > > > > > > ---
> > > > > > > > >  drivers/net/virtio_net.c | 55 
> > > > > > > > > +---
> > > > > > > > >  1 file changed, 52 insertions(+), 3 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/drivers/net/virtio_net.c 
> > > > > > > > > b/drivers/net/virtio_net.c
> > > > > > > > > index 56ca1d270304..5d2f1da4eaa0 100644
> > > > > > > > > --- a/drivers/net/virtio_net.c
> > > > > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > > > > @@ -265,6 +265,12 @@ struct virtnet_info {
> > > > > > > > >   /* Work struct for config space updates */
> > > > > > > > >   struct work_struct config_work;
> > > > > > > > >
> > > > > > > > > + /* Work struct for config rx mode */
> > > > > > > >
> > > > > > > > With a bit less abbreviation maybe? setting rx mode?
> > > > > > >
> > > > > > > That's fine.
> > > > > > >
> > > > > > > >
> > > > > > > > > + struct work_struct rx_mode_work;
> > > > > > > > > +
> > > > > > > > > + /* Is rx mode work enabled? */
> > > > > > > >
> > > > > > > > Ugh not a great comment.
> > > > > > >
> > > > > > > Any suggestions for this. E.g we had:
> > > > > > >
> > > > > > > /* Is delayed refill enabled? */
> > > > > >
> > > > > > /* OK to queue work setting RX mode? */
> > > > >
> > > > > Ok.
> > > > >
> > > > > >
> > > > > >
> > > > > > > >
> > > > > > > > > + bool rx_mode_work_enabled;
> > > > > > > > > +
> > > > > > > >
> > > > > > > >
> > > > > > > >
> > > > > > > > >   /* Does the affinity hint is set for virtqueues? */
> > > > > > > > >   bool affinity_hint_set;
> > > > > > > > >
> > > > > > > > > @@ -388,6 +394,20 @@ static void 
> > > > > > > > > disable_delayed_refill(struct virtnet_info *vi)
> > > > > > > > >   spin_unlock_bh(&vi->refill_lock);
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi)
> > > > > > > > > +{
> > > > > > > > > + rtnl_lock();
> > > > > > > > > + vi->rx_mode_work_enabled = true;
> > > > > > > > > + rtnl_unlock();
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static void disable_rx_mode_work(struct virtnet_info *vi)
> > > > > > > > > +{
> > > > > > > > > + rtnl_lock();
> > > > > > > > > + vi->rx_mode_work_enabled = false;
> > > > > > > > > + rtnl_unlock();
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > >  static void virtqueue_napi_schedule(struct napi_struct *napi,
> > > > > > > > >   struct virtqueue *vq)
> > > > > > > > >  {
> > > > > > > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct 
> > > > > > > > > net_device *dev)
> > > > > > > > >   return 0;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev)
> > > > > > > > > +static void virtnet_rx_mode_work(struct work_struct *work)
> > > > > > > > >  {
> > > > > > > > > - struct virtnet_info *vi = netdev_priv(dev);
> > > > > > > > > + struct virtnet_info *vi =
> > > > > > > > > + container_of(work, struct virtnet_info, 
> > > > > > > > > rx_mode_work);
> > > > > > > > > + struct net_device *dev = vi->dev;
> > > > > > > > >   struct scatterlist sg[2];
> > > > > > > > >   struct virtio_net_ctrl_mac *mac_data;
> > > > > > > > >   struct netdev_hw_addr *ha;
> > > > > > > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct 
> > > > > > > > > net_device *dev)
> > > > > > > > >   if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
> > > > > > > > >   return;
> > > > > > > > >
> > > > > > > > > + rtnl_lock();
> > > > > > > > > +
> > > > > > > > >   vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> > > > > > > > >   vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
> > > > > > > > >
> > > > > > > > > @@ -2373,14 +239