Hi Chenbo,

Thanks for your reply, my reply is inline.

> -----Original Message-----
> From: Xia, Chenbo <chenbo....@intel.com>
> Sent: Monday, May 23, 2022 11:42 AM
> To: Pei, Andy <andy....@intel.com>; dev@dpdk.org
> Cc: maxime.coque...@redhat.com; Cao, Gang <gang....@intel.com>; Liu,
> Changpeng <changpeng....@intel.com>; Xu, Rosen <rosen...@intel.com>;
> Xiao, QimaiX <qimaix.x...@intel.com>
> Subject: RE: [PATCH v8 01/13] vdpa/ifc: add support for virtio blk device
> 
> Hi Andy,
> 
> > -----Original Message-----
> > From: Pei, Andy <andy....@intel.com>
> > Sent: Wednesday, May 18, 2022 8:14 PM
> > To: dev@dpdk.org
> > Cc: Xia, Chenbo <chenbo....@intel.com>; maxime.coque...@redhat.com;
> > Cao, Gang <gang....@intel.com>; Liu, Changpeng
> > <changpeng....@intel.com>; Xu, Rosen <rosen...@intel.com>; Xiao,
> > QimaiX <qimaix.x...@intel.com>
> > Subject: [PATCH v8 01/13] vdpa/ifc: add support for virtio blk device
> >
> > Re-use the vdpa/ifc code, distinguish blk and net device by pci_device_id.
> > Blk and net device are implemented with proper feature and ops.
> >
> > Signed-off-by: Andy Pei <andy....@intel.com>
> > ---
> >  drivers/vdpa/ifc/base/ifcvf.h | 16 +++++++-
> > drivers/vdpa/ifc/ifcvf_vdpa.c | 91
> > +++++++++++++++++++++++++++++++++++++++----
> >  2 files changed, 98 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/vdpa/ifc/base/ifcvf.h
> > b/drivers/vdpa/ifc/base/ifcvf.h index 573a35f..483d38b 100644
> > --- a/drivers/vdpa/ifc/base/ifcvf.h
> > +++ b/drivers/vdpa/ifc/base/ifcvf.h
> > @@ -5,8 +5,17 @@
> >  #ifndef _IFCVF_H_
> >  #define _IFCVF_H_
> >
> > +#include <linux/virtio_blk.h>
> >  #include "ifcvf_osdep.h"
> >
> > +#define IFCVF_NET  0
> > +#define IFCVF_BLK  1
> > +
> > +/* for BLK */
> > +#define IFCVF_BLK_TRANSITIONAL_DEVICE_ID    0x1001
> > +#define IFCVF_BLK_MODERN_DEVICE_ID          0x1042
> > +#define IFCVF_BLK_DEVICE_ID                 0x0002
> > +
> >  #define IFCVF_VENDOR_ID            0x1AF4
> >  #define IFCVF_DEVICE_ID            0x1041
> >  #define IFCVF_SUBSYS_VENDOR_ID     0x8086
> 
> Let's rename IFCVF_DEVICE_ID to IFCVF_NET_DEVICE_ID as it's only used for
> net now.
> 
Sure, I will do it in next version.

> > @@ -126,13 +135,18 @@ struct ifcvf_hw {
> >     u8     notify_region;
> >     u32    notify_off_multiplier;
> >     struct ifcvf_pci_common_cfg *common_cfg;
> > -   struct ifcvf_net_config *dev_cfg;
> > +   union {
> > +           struct ifcvf_net_config *net_cfg;
> > +           struct virtio_blk_config *blk_cfg;
> > +           void *dev_cfg;
> > +   };
> >     u8     *isr;
> >     u16    *notify_base;
> >     u16    *notify_addr[IFCVF_MAX_QUEUES * 2];
> >     u8     *lm_cfg;
> >     struct vring_info vring[IFCVF_MAX_QUEUES * 2];
> >     u8 nr_vring;
> > +   int device_type;
> >     struct ifcvf_pci_mem_resource
> mem_resource[IFCVF_PCI_MAX_RESOURCE];
> >  };
> >
> > diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c
> > b/drivers/vdpa/ifc/ifcvf_vdpa.c index 9f05595..be0efd3 100644
> > --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
> > +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
> > @@ -75,6 +75,12 @@ struct internal_list {
> >     struct ifcvf_internal *internal;
> >  };
> >
> > +/* vdpa device info includes device features and devcic operation. */
> > +struct rte_vdpa_dev_info {
> > +   uint64_t features;
> > +   struct rte_vdpa_dev_ops *ops;
> > +};
> > +
> >  TAILQ_HEAD(internal_list_head, internal_list);  static struct
> > internal_list_head internal_list =
> >     TAILQ_HEAD_INITIALIZER(internal_list);
> > @@ -1167,6 +1173,48 @@ struct internal_list {
> >     return 0;
> >  }
> >
> > +static int16_t
> > +ifcvf_pci_get_device_type(struct rte_pci_device *pci_dev) {
> > +   uint16_t pci_device_id = pci_dev->id.device_id;
> > +   uint16_t device_id;
> > +
> > +   if (pci_device_id < 0x1000 || pci_device_id > 0x107f) {
> > +           DRV_LOG(ERR, "Probe device is not a virtio device\n");
> > +           return -1;
> > +   }
> > +
> > +   if (pci_device_id < 0x1040) {
> > +           /* Transitional devices: use the PCI subsystem device id as
> > +            * virtio device id, same as legacy driver always did.
> > +            */
> > +           device_id = pci_dev->id.subsystem_device_id;
> > +   } else {
> > +           /* Modern devices: simply use PCI device id,
> > +            * but start from 0x1040.
> > +            */
> > +           device_id = pci_device_id - 0x1040;
> > +   }
> > +
> > +   return device_id;
> > +}
> > +
> > +struct rte_vdpa_dev_info dev_info[] = {
> > +   {
> > +           .features = (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
> > +                       (1ULL << VIRTIO_NET_F_CTRL_VQ) |
> > +                       (1ULL << VIRTIO_NET_F_STATUS) |
> > +                       (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
> > +                       (1ULL << VHOST_F_LOG_ALL),
> > +           .ops = &ifcvf_ops,
> 
> Rename ifcvf_ops -> ifcvf_net_ops
> 
> Overall the patch LGTM. With above fixed:
> 
> Reviewed-by: Chenbo Xia <chenbo....@intel.com>
> 
OK, I will do this in next version.
> > +   },
> > +   {
> > +           .features = (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
> |
> > +                       (1ULL << VHOST_F_LOG_ALL),
> > +           .ops = NULL,
> > +   },
> > +};
> > +
> >  static int
> >  ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
> >             struct rte_pci_device *pci_dev)
> > @@ -1178,6 +1226,7 @@ struct internal_list {
> >     int sw_fallback_lm = 0;
> >     struct rte_kvargs *kvlist = NULL;
> >     int ret = 0;
> > +   int16_t device_id;
> >
> >     if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> >             return 0;
> > @@ -1227,13 +1276,24 @@ struct internal_list {
> >     internal->configured = 0;
> >     internal->max_queues = IFCVF_MAX_QUEUES;
> >     features = ifcvf_get_features(&internal->hw);
> > -   internal->features = (features &
> > -           ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
> > -           (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
> > -           (1ULL << VIRTIO_NET_F_CTRL_VQ) |
> > -           (1ULL << VIRTIO_NET_F_STATUS) |
> > -           (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
> > -           (1ULL << VHOST_F_LOG_ALL);
> > +
> > +   device_id = ifcvf_pci_get_device_type(pci_dev);
> > +   if (device_id < 0) {
> > +           DRV_LOG(ERR, "failed to get device %s type", pci_dev-
> >name);
> > +           goto error;
> > +   }
> > +
> > +   if (device_id == VIRTIO_ID_NET) {
> > +           internal->hw.device_type = IFCVF_NET;
> > +           internal->features = features &
> > +                                   ~(1ULL <<
> VIRTIO_F_IOMMU_PLATFORM);
> > +           internal->features |= dev_info[IFCVF_NET].features;
> > +   } else if (device_id == VIRTIO_ID_BLOCK) {
> > +           internal->hw.device_type = IFCVF_BLK;
> > +           internal->features = features &
> > +                                   ~(1ULL <<
> VIRTIO_F_IOMMU_PLATFORM);
> > +           internal->features |= dev_info[IFCVF_BLK].features;
> > +   }
> >
> >     list->internal = internal;
> >
> > @@ -1245,7 +1305,8 @@ struct internal_list {
> >     }
> >     internal->sw_lm = sw_fallback_lm;
> >
> > -   internal->vdev = rte_vdpa_register_device(&pci_dev->device,
> > &ifcvf_ops);
> > +   internal->vdev = rte_vdpa_register_device(&pci_dev->device,
> > +                           dev_info[internal->hw.device_type].ops);
> >     if (internal->vdev == NULL) {
> >             DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
> >             goto error;
> > @@ -1313,6 +1374,20 @@ struct internal_list {
> >       .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
> >     },
> >
> > +   { .class_id = RTE_CLASS_ANY_ID,
> > +     .vendor_id = IFCVF_VENDOR_ID,
> > +     .device_id = IFCVF_BLK_TRANSITIONAL_DEVICE_ID,
> > +     .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
> > +     .subsystem_device_id = IFCVF_BLK_DEVICE_ID,
> > +   },
> > +
> > +   { .class_id = RTE_CLASS_ANY_ID,
> > +     .vendor_id = IFCVF_VENDOR_ID,
> > +     .device_id = IFCVF_BLK_MODERN_DEVICE_ID,
> > +     .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
> > +     .subsystem_device_id = IFCVF_BLK_DEVICE_ID,
> > +   },
> > +
> >     { .vendor_id = 0, /* sentinel */
> >     },
> >  };
> > --
> > 1.8.3.1

Reply via email to